def __init__(self, absolute=None, relative=None, strict=True): self.absolute = absolute self.relative = relative if not self.absolute and not self.relative: err = 'Neither absolute nor relative tolerance given.' raise exceptions.TestCodeError(err) self.strict = strict
def select_benchmark_file(self, path, input_file, args): '''Find the first benchmark file out of all benchmark IDs which exists.''' benchmark = None benchmarks = [] for bench_id in self.benchmark: benchfile = util.testcode_filename(FILESTEM['benchmark'], bench_id, input_file, args) benchmarks.append(benchfile) if os.path.exists(os.path.join(path, benchfile)): benchmark = benchfile break if not benchmark: err = 'No benchmark found in %s. Checked for: %s.' raise exceptions.TestCodeError(err % (path, ', '.join(benchmarks))) return benchmark
def __init__(self, name, exe, test_id, benchmark, **kwargs): # Set sane defaults (mostly null) for keyword arguments. self.name = name # Running self.exe = exe self.test_id = test_id self.run_cmd_template = ('tc.program tc.args tc.input > ' 'tc.output 2> tc.error') self.launch_parallel = 'mpirun -np tc.nprocs' self.submit_template = None self.submit_pattern = 'testcode.run_cmd' # dummy job with default settings (e.g tolerance) self.default_test_settings = None # Analysis self.benchmark = benchmark self.ignore_fields = [] self.data_tag = None self.extract_cmd_template = 'tc.extract tc.args tc.file' self.extract_program = None self.extract_args = '' self.extract_fmt = 'table' self.skip_cmd_template = 'tc.skip tc.args tc.test' self.skip_program = None self.skip_args = '' self.verify = False # Info self.vcs = None # Set values passed in as keyword options. for (attr, val) in kwargs.items(): setattr(self, attr, val) # If using an external verification program, then set the default # extract command template. if self.verify and 'extract_cmd_template' not in kwargs: self.extract_cmd_template = 'tc.extract tc.args tc.test tc.bench' # Can we actually extract the data? if self.extract_fmt == 'yaml' and not _HAVE_YAML: err = 'YAML data format cannot be used: PyYAML is not installed.' raise exceptions.TestCodeError(err)
def get_unique_test_id(tests, reuse_id=False, date_fmt='%d%m%Y'): '''Find a unique test id based upon the date and previously run tests.''' todays_id = time.strftime(date_fmt) newest_file = None test_id = '0'*len(todays_id) for test in tests: test_globs = glob.glob('%s*' % os.path.join(test.path, testcode2.FILESTEM['test']) ) for test_file in test_globs: if (not newest_file or os.stat(test_file)[-2] > os.stat(newest_file)[-2]): newest_file = test_file # keep track of the latest file with today's test_id (in case # the most recent test was run with a user-specified test_id). newest_test_id = util.testcode_file_id( newest_file, testcode2.FILESTEM['test'] ) if newest_test_id[:len(todays_id)] == todays_id: test_id = newest_test_id if reuse_id: # Want test_id to be the most recent set of tests. if not newest_file: err = 'Cannot find any previous test outputs.' raise exceptions.TestCodeError(err) test_id = util.testcode_file_id(newest_file, testcode2.FILESTEM['test']) elif test_id[:len(todays_id)] == todays_id: # Have run at more than one test today already. Create unique id. if len(test_id) == len(todays_id): test_id = 1 else: test_id = int(test_id[len(todays_id)+1:]) + 1 test_id = '%s-%s' % (todays_id, test_id) else: # First test of the day! test_id = todays_id return test_id
def __init__(self, name, exe, test_id, benchmark, **kwargs): # Set sane defaults (mostly null) for keyword arguments. self.name = name # Running self.exe = exe self.test_id = test_id self.run_cmd_template = ('tc.program tc.args tc.input > ' 'tc.output 2> tc.error') self.launch_parallel = 'mpirun -np tc.nprocs' self.submit_pattern = 'testcode.run_cmd' # dummy job with default settings (e.g tolerance) self.default_test_settings = None # Analysis self.benchmark = benchmark self.ignore_fields = [] self.data_tag = None self.extract_cmd_template = 'tc.extract tc.args tc.file' self.extract_program = None self.extract_args = '' self.extract_fmt = 'table' self.skip_cmd_template = 'tc.skip tc.args tc.test' self.skip_program = None self.skip_args = '' self.verify = False self.extract_fn = None # Info self.vcs = None # Set values passed in as keyword options. for (attr, val) in kwargs.items(): setattr(self, attr, val) # If using an external verification program, then set the default # extract command template. if self.verify and 'extract_cmd_template' not in kwargs: self.extract_cmd_template = 'tc.extract tc.args tc.test tc.bench' if self.extract_fn: if _HAVE_IMPORTLIB_: self.extract_fn = self.extract_fn.split() if len(self.extract_fn) == 2: sys.path.append(self.extract_fn[0]) (mod, fn) = self.extract_fn[-1].rsplit('.', 1) mod = importlib.import_module(mod) self.extract_fn = mod.__getattribute__(fn) elif self.extract_program: warnings.warn('importlib not available. Will attempt to ' 'analyse data via an external script.') self.extract_fn = None else: raise exceptions.TestCodeError( 'importlib not available and ' 'no data extraction program supplied.') # Can we actually extract the data? if self.extract_fmt == 'yaml' and not _HAVE_YAML: err = 'YAML data format cannot be used: PyYAML is not installed.' raise exceptions.TestCodeError(err)
def parse_userconfig(config_file, executables=None, test_id=None, settings=None): '''Parse the user options and job types from the userconfig file. config_file: location of the userconfig file, either relative or absolute.''' if executables is None: executables = {} if not os.path.exists(config_file): raise exceptions.TestCodeError( 'User configuration file %s does not exist.' % (config_file) ) # paths to programs can be specified relative to the config # file. config_directory = os.path.dirname(os.path.abspath(config_file)) userconfig = compat.configparser.RawConfigParser() userconfig.optionxform = str # Case sensitive file. userconfig.read(config_file) # Alter config file with additional settings provided. if settings: for (section_key, section) in settings.items(): for (option_key, value) in section.items(): userconfig.set(section_key, option_key, value) # Sensible defaults for the user options. user_options = dict(benchfile=None, date_fmt='%d%m%Y', tolerance='(1.e-10,None)', output_files=None, diff='diff') if userconfig.has_section('user'): user_options.update(dict(userconfig.items('user'))) userconfig.remove_section('user') user_options['tolerance'] = dict( (parse_tolerance_tuple(item) for item in eval_nested_tuple(user_options['tolerance'])) ) else: raise exceptions.TestCodeError( 'user section in userconfig does not exist.' ) if not userconfig.sections(): raise exceptions.TestCodeError( 'No job types specified in userconfig.' ) test_program_options = ('run_cmd_template', 'submit_template', 'launch_parallel', 'ignore_fields', 'data_tag', 'extract_cmd_template', 'extract_program', 'extract_args', 'extract_fmt', 'verify', 'vcs', 'skip_program', 'skip_args', 'skip_cmd_template') default_test_options = ('inputs_args', 'output', 'nprocs') test_programs = {} for section in userconfig.sections(): tp_dict = {} tolerances = copy.deepcopy(user_options['tolerance']) # Read in possible TestProgram settings. for item in test_program_options: if userconfig.has_option(section, item): tp_dict[item] = userconfig.get(section, item) if section in executables: exe = executables[section] elif '_tc_all' in executables: exe = executables['_tc_all'] else: exe = 'exe' if userconfig.has_option(section, exe): # exe is set to be a key rather than the path to an executable. # Expand. exe = userconfig.get(section, exe) # Create a default test settings. # First, tolerances... if userconfig.has_option(section, 'tolerance'): for item in ( eval_nested_tuple(userconfig.get(section, 'tolerance')) ): (name, tol) = parse_tolerance_tuple(item) tolerances[name] = tol test_dict = dict( default_tolerance=tolerances[None], tolerances=tolerances, ) # Other settings... for item in default_test_options: if userconfig.has_option(section, item): test_dict[item] = userconfig.get(section, item) if userconfig.has_option(section, 'run_concurrent'): test_dict['run_concurrent'] = \ userconfig.getboolean(section, 'run_concurrent') # Programs can be specified relative to the config directory. exe = set_program_name(exe, config_directory) if 'extract_program' in tp_dict: tp_dict['extract_program'] = set_program_name( tp_dict['extract_program'], config_directory) if 'skip_program' in tp_dict: tp_dict['skip_program'] = set_program_name( tp_dict['skip_program'], config_directory) if 'submit_template' in tp_dict: tp_dict['submit_template'] = os.path.join(config_directory, tp_dict['submit_template']) for key in ('nprocs', 'max_nprocs', 'min_nprocs'): if key in test_dict: test_dict[key] = int(test_dict[key]) if 'inputs_args' in test_dict: # format: (input, arg), (input, arg)' test_dict['inputs_args'] = ( eval_nested_tuple(test_dict['inputs_args'])) # Create a default test. tp_dict['default_test_settings'] = testcode2.Test(None, None, None, **test_dict) if 'vcs' in tp_dict: tp_dict['vcs'] = vcs.VCSRepository(tp_dict['vcs'], os.path.dirname(exe)) program = testcode2.TestProgram(section, exe, test_id, user_options['benchmark'], **tp_dict) test_programs[section] = program if len(test_programs) == 1: # only one program; set default program which helpfully is the most # recent value of section from the previous loop. user_options['default_program'] = section return (user_options, test_programs)
def parse_jobconfig(config_file, user_options, test_programs, settings=None): '''Parse the test configurations from the jobconfig file. config_file: location of the jobconfig file, either relative or absolute.''' if not os.path.exists(config_file): raise exceptions.TestCodeError( 'Job configuration file %s does not exist.' % (config_file) ) # paths to the test directories can be specified relative to the config # file. config_directory = os.path.dirname(os.path.abspath(config_file)) jobconfig = compat.configparser.RawConfigParser() jobconfig.optionxform = str # Case sensitive file. jobconfig.read(config_file) # Alter config file with additional settings provided. if settings: for (section_key, section) in settings.items(): for (option_key, value) in section.items(): jobconfig.set(section_key, option_key, value) # Parse job categories. # Just store as list of test names for now. if jobconfig.has_section('categories'): test_categories = dict(jobconfig.items('categories')) for (key, val) in test_categories.items(): test_categories[key] = val.split() jobconfig.remove_section('categories') else: test_categories = {} # Parse individual sections for tests. # Note that sections/paths may contain globs and hence correspond to # multiple tests. # First, find out the tests each section corresponds to. test_sections = [] for section in jobconfig.sections(): # Expand any globs in the path/section name and create individual Test # objects for each one. if jobconfig.has_option(section, 'path'): path = os.path.join(config_directory, jobconfig.get(section, 'path')) jobconfig.remove_option('path') globbed_tests = [(section, test_path) for test_path in glob.glob(path)] else: path = os.path.join(config_directory, section) globbed_tests = [(test_path, test_path) for test_path in glob.glob(path)] test_sections.append((section, globbed_tests)) test_sections.sort(key=lambda sec_info: len(sec_info[1]), reverse=True) test_info = {} for (section, globbed_tests) in test_sections: test_dict = {} # test program if jobconfig.has_option(section, 'program'): test_program = test_programs[jobconfig.get(section, 'program')] else: test_program = test_programs[user_options['default_program']] # tolerances if jobconfig.has_option(section, 'tolerance'): test_dict['tolerances'] = {} for item in ( eval_nested_tuple(jobconfig.get(section,'tolerance')) ): (name, tol) = parse_tolerance_tuple(item) test_dict['tolerances'][name] = tol jobconfig.remove_option(section, 'tolerance') if None in test_dict['tolerances']: test_dict['default_tolerance'] = test_dict['tolerances'][None] # inputs and arguments if jobconfig.has_option(section, 'inputs_args'): # format: (input, arg), (input, arg)' test_dict['inputs_args'] = ( eval_nested_tuple(jobconfig.get(section, 'inputs_args'))) jobconfig.remove_option(section, 'inputs_args') if jobconfig.has_option(section, 'run_concurrent'): test_dict['run_concurrent'] = \ jobconfig.getboolean(section, 'run_concurrent') jobconfig.remove_option(section, 'run_concurrent') # Other options. for option in jobconfig.options(section): test_dict[option] = jobconfig.get(section, option) for key in ('nprocs', 'max_nprocs', 'min_nprocs'): if key in test_dict: test_dict[key] = int(test_dict[key]) for (name, path) in globbed_tests: # Need to take care with tolerances: want to *update* existing # tolerance dictionary rather than overwrite it. # This means we can't just use test_dict to update the relevant # dictionary in test_info. tol = None if name in test_info: # Just update existing info. test = test_info[name] if 'tolerances' in test_dict: test[2]['tolerances'].update(test_dict['tolerances']) tol = test_dict.pop('tolerances') test[0] = test_program test[1] = path test[2].update(test_dict) if tol: test_dict['tolerances'] = tol else: # Create new test_info value. # Merge with default values. # Default test options. default_test = test_program.default_test_settings test = dict( inputs_args=default_test.inputs_args, output=default_test.output, default_tolerance=default_test.default_tolerance, tolerances = copy.deepcopy(default_test.tolerances), nprocs=default_test.nprocs, min_nprocs=default_test.min_nprocs, max_nprocs=default_test.max_nprocs, run_concurrent=default_test.run_concurrent, ) if 'tolerances' in test_dict: test['tolerances'].update(test_dict['tolerances']) tol = test_dict.pop('tolerances') test.update(test_dict) # restore tolerances for next test in the glob. if tol: test_dict['tolerances'] = tol test_info[name] = [test_program, path, copy.deepcopy(test)] # Now create the tests (after finding out what the input files are). tests = [] for (name, (test_program, path, test_dict)) in test_info.items(): old_dir = os.getcwd() os.chdir(path) # Expand any globs in the input files. inputs_args = [] for input_arg in test_dict['inputs_args']: # Be a little forgiving for the input_args config option. # If we're given ('input'), then clearly the user meant for the # args option to be empty. However, literal_eval returns # a string rather than a tuple in such cases, which causes # problems. if isinstance(input_arg, str): inp = input_arg arg = '' elif len(input_arg) == 2: inp = input_arg[0] arg = input_arg[1] else: inp = input_arg[0] arg = '' if inp: # the test, error and benchmark filenames contain the input # filename, so we need to filter them out. inp_files = sorted(glob.glob(inp)) if not inp_files: err = 'Cannot find input file %s in %s.' % (inp, path) raise exceptions.TestCodeError(err) for inp_file in inp_files: # We use a glob for the input argument to avoid the # case where the argument is empty and hence a pattern # such as *.inp also matches files like # test.out.test_id.inp=x.inp and hence considering # previous output files to actually be an input file in # their own right. test_files = [ util.testcode_filename(stem[1], '*', '*', arg) for stem in testcode2._FILESTEM_TUPLE ] testcode_files = [] for tc_file in test_files: testcode_files.extend(glob.glob(tc_file)) if inp_file not in testcode_files: inputs_args.append((inp_file, arg)) else: inputs_args.append((inp, arg)) test_dict['inputs_args'] = tuple(inputs_args) os.chdir(old_dir) # Create test. if test_dict['run_concurrent']: for input_arg in test_dict['inputs_args']: test_dict['inputs_args'] = (input_arg,) tests.append(testcode2.Test(name, test_program, path, **test_dict)) else: tests.append(testcode2.Test(name, test_program, path, **test_dict)) return (tests, test_categories)