예제 #1
0
def parse_userconfig(config_file, executables=None, test_id=None,
        settings=None):
    '''Parse the user options and job types from the userconfig file.

config_file: location of the userconfig file, either relative or absolute.'''

    if executables is None:
        executables = {}

    if not os.path.exists(config_file):
        raise exceptions.TestCodeError(
                'User configuration file %s does not exist.' % (config_file)
                                      )
    # paths to programs can be specified relative to the config
    # file.
    config_directory = os.path.dirname(os.path.abspath(config_file))

    userconfig = compat.configparser.RawConfigParser()
    userconfig.optionxform = str # Case sensitive file.
    userconfig.read(config_file)

    # Alter config file with additional settings provided.
    if settings:
        for (section_key, section) in settings.items():
            for (option_key, value) in section.items():
                userconfig.set(section_key, option_key, value)

    # Sensible defaults for the user options.
    user_options = dict(benchfile=None, date_fmt='%d%m%Y',
            tolerance='(1.e-10,None)', output_files=None, diff='diff')

    if userconfig.has_section('user'):
        user_options.update(dict(userconfig.items('user')))
        userconfig.remove_section('user')
        user_options['tolerance'] = dict(
                (parse_tolerance_tuple(item)
                     for item in eval_nested_tuple(user_options['tolerance']))
                                        )
    else:
        raise exceptions.TestCodeError(
                'user section in userconfig does not exist.'
                                      )

    if not userconfig.sections():
        raise exceptions.TestCodeError(
                'No job types specified in userconfig.'
                                      )

    test_program_options = ('run_cmd_template', 'submit_template',
        'launch_parallel', 'ignore_fields', 'data_tag', 'extract_cmd_template',
        'extract_program', 'extract_args', 'extract_fmt', 'verify', 'vcs',
        'skip_program', 'skip_args', 'skip_cmd_template')
    default_test_options = ('inputs_args', 'output', 'nprocs')
    test_programs = {}
    for section in userconfig.sections():
        tp_dict = {}
        tolerances = copy.deepcopy(user_options['tolerance'])
        # Read in possible TestProgram settings.
        for item in test_program_options:
            if userconfig.has_option(section, item):
                tp_dict[item] = userconfig.get(section, item)
        if section in executables:
            exe = executables[section]
        elif '_tc_all' in executables:
            exe = executables['_tc_all']
        else:
            exe = 'exe'
        if userconfig.has_option(section, exe):
            # exe is set to be a key rather than the path to an executable.
            # Expand.
            exe = userconfig.get(section, exe)
        # Create a default test settings.
        # First, tolerances...
        if userconfig.has_option(section, 'tolerance'):
            for item in (
                    eval_nested_tuple(userconfig.get(section, 'tolerance'))
                        ):
                (name, tol) = parse_tolerance_tuple(item)
                tolerances[name] = tol
        test_dict = dict(
                         default_tolerance=tolerances[None],
                         tolerances=tolerances,
                        )
        # Other settings...
        for item in default_test_options:
            if userconfig.has_option(section, item):
                test_dict[item] = userconfig.get(section, item)
        if userconfig.has_option(section, 'run_concurrent'):
            test_dict['run_concurrent'] = \
                    userconfig.getboolean(section, 'run_concurrent')
        # Programs can be specified relative to the config directory.
        exe = set_program_name(exe, config_directory)
        if 'extract_program' in tp_dict:
            tp_dict['extract_program'] = set_program_name(
                                tp_dict['extract_program'], config_directory)
        if 'skip_program' in tp_dict:
            tp_dict['skip_program'] = set_program_name(
                                tp_dict['skip_program'], config_directory)
        if 'submit_template' in tp_dict:
            tp_dict['submit_template'] = os.path.join(config_directory,
                                                    tp_dict['submit_template'])
        for key in ('nprocs', 'max_nprocs', 'min_nprocs'):
            if key in test_dict:
                test_dict[key] = int(test_dict[key])
        if 'inputs_args' in test_dict:
            # format: (input, arg), (input, arg)'
            test_dict['inputs_args'] = (
                    eval_nested_tuple(test_dict['inputs_args']))
        # Create a default test.
        tp_dict['default_test_settings'] = testcode2.Test(None, None, None,
                **test_dict)
        if 'vcs' in tp_dict:
            tp_dict['vcs'] = vcs.VCSRepository(tp_dict['vcs'],
                    os.path.dirname(exe))
        program = testcode2.TestProgram(section, exe, test_id,
            user_options['benchmark'], **tp_dict)
        test_programs[section] = program

        if len(test_programs) == 1:
            # only one program; set default program which helpfully is the most
            # recent value of section from the previous loop.
            user_options['default_program'] = section

    return (user_options, test_programs)
예제 #2
0
def parse_jobconfig(config_file, user_options, test_programs, settings=None):
    '''Parse the test configurations from the jobconfig file.

config_file: location of the jobconfig file, either relative or absolute.'''

    if not os.path.exists(config_file):
        raise exceptions.TestCodeError(
                'Job configuration file %s does not exist.' % (config_file)
                                      )

    # paths to the test directories can be specified relative to the config
    # file.
    config_directory = os.path.dirname(os.path.abspath(config_file))

    jobconfig = compat.configparser.RawConfigParser()
    jobconfig.optionxform = str # Case sensitive file.
    jobconfig.read(config_file)

    # Alter config file with additional settings provided.
    if settings:
        for (section_key, section) in settings.items():
            for (option_key, value) in section.items():
                jobconfig.set(section_key, option_key, value)

    # Parse job categories.
    # Just store as list of test names for now.
    if jobconfig.has_section('categories'):
        test_categories = dict(jobconfig.items('categories'))
        for (key, val) in test_categories.items():
            test_categories[key] = val.split()
        jobconfig.remove_section('categories')
    else:
        test_categories = {}

    # Parse individual sections for tests.
    # Note that sections/paths may contain globs and hence correspond to
    # multiple tests.
    # First, find out the tests each section corresponds to.
    test_sections = []
    for section in jobconfig.sections():
        # Expand any globs in the path/section name and create individual Test
        # objects for each one.
        if jobconfig.has_option(section, 'path'):
            path = os.path.join(config_directory,
                                jobconfig.get(section, 'path'))
            jobconfig.remove_option('path')
            globbed_tests = [(section, test_path)
                                            for test_path in glob.glob(path)]
        else:
            path = os.path.join(config_directory, section)
            globbed_tests = [(test_path, test_path)
                                            for test_path in glob.glob(path)]
            test_sections.append((section, globbed_tests))
    test_sections.sort(key=lambda sec_info: len(sec_info[1]), reverse=True)
    test_info = {}
    for (section, globbed_tests) in test_sections:
        test_dict = {}
        # test program
        if jobconfig.has_option(section, 'program'):
            test_program = test_programs[jobconfig.get(section, 'program')]
        else:
            test_program = test_programs[user_options['default_program']]
        # tolerances
        if jobconfig.has_option(section, 'tolerance'):
            test_dict['tolerances'] = {}
            for item in (
                    eval_nested_tuple(jobconfig.get(section,'tolerance'))
                        ):
                (name, tol) = parse_tolerance_tuple(item)
                test_dict['tolerances'][name] = tol
            jobconfig.remove_option(section, 'tolerance')
            if None in test_dict['tolerances']:
                test_dict['default_tolerance'] = test_dict['tolerances'][None]
        # inputs and arguments
        if jobconfig.has_option(section, 'inputs_args'):
            # format: (input, arg), (input, arg)'
            test_dict['inputs_args'] = (
                    eval_nested_tuple(jobconfig.get(section, 'inputs_args')))
            jobconfig.remove_option(section, 'inputs_args')
        if jobconfig.has_option(section, 'run_concurrent'):
            test_dict['run_concurrent'] = \
                    jobconfig.getboolean(section, 'run_concurrent')
            jobconfig.remove_option(section, 'run_concurrent')
        # Other options.
        for option in jobconfig.options(section):
            test_dict[option] = jobconfig.get(section, option)
        for key in ('nprocs', 'max_nprocs', 'min_nprocs'):
            if key in test_dict:
                test_dict[key] = int(test_dict[key])
        for (name, path) in globbed_tests:
            # Need to take care with tolerances: want to *update* existing
            # tolerance dictionary rather than overwrite it.
            # This means we can't just use test_dict to update the relevant
            # dictionary in test_info.
            tol = None
            if name in test_info:
                # Just update existing info.
                test = test_info[name]
                if  'tolerances' in test_dict:
                    test[2]['tolerances'].update(test_dict['tolerances'])
                    tol = test_dict.pop('tolerances')
                test[0] = test_program
                test[1] = path
                test[2].update(test_dict)
                if tol:
                    test_dict['tolerances'] = tol
            else:
                # Create new test_info value.
                # Merge with default values.
                # Default test options.
                default_test = test_program.default_test_settings
                test = dict(
                        inputs_args=default_test.inputs_args,
                        output=default_test.output,
                        default_tolerance=default_test.default_tolerance,
                        tolerances = copy.deepcopy(default_test.tolerances),
                        nprocs=default_test.nprocs,
                        min_nprocs=default_test.min_nprocs,
                        max_nprocs=default_test.max_nprocs,
                        run_concurrent=default_test.run_concurrent,
                    )
                if  'tolerances' in test_dict:
                    test['tolerances'].update(test_dict['tolerances'])
                    tol = test_dict.pop('tolerances')
                test.update(test_dict)
                # restore tolerances for next test in the glob.
                if tol:
                    test_dict['tolerances'] = tol
                test_info[name] = [test_program, path, copy.deepcopy(test)]

    # Now create the tests (after finding out what the input files are).
    tests = []
    for (name, (test_program, path, test_dict)) in test_info.items():
        old_dir = os.getcwd()
        os.chdir(path)
        # Expand any globs in the input files.
        inputs_args = []
        for input_arg in test_dict['inputs_args']:
            # Be a little forgiving for the input_args config option.
            # If we're given ('input'), then clearly the user meant for the
            # args option to be empty.  However, literal_eval returns
            # a string rather than a tuple in such cases, which causes
            # problems.
            if isinstance(input_arg, str):
                inp = input_arg
                arg = ''
            elif len(input_arg) == 2:
                inp = input_arg[0]
                arg = input_arg[1]
            else:
                inp = input_arg[0]
                arg = ''
            if inp:
                # the test, error and benchmark filenames contain the input
                # filename, so we need to filter them out.
                inp_files = sorted(glob.glob(inp))
                if not inp_files:
                    err = 'Cannot find input file %s in %s.' % (inp, path)
                    raise exceptions.TestCodeError(err)
                for inp_file in inp_files:
                    # We use a glob for the input argument to avoid the
                    # case where the argument is empty and hence a pattern
                    # such as *.inp also matches files like
                    # test.out.test_id.inp=x.inp and hence considering
                    # previous output files to actually be an input file in
                    # their own right.
                    test_files = [
                         util.testcode_filename(stem[1], '*', '*', arg)
                         for stem in testcode2._FILESTEM_TUPLE
                                 ]
                    testcode_files = []
                    for tc_file in test_files:
                        testcode_files.extend(glob.glob(tc_file))
                    if inp_file not in testcode_files:
                        inputs_args.append((inp_file, arg))
            else:
                inputs_args.append((inp, arg))
        test_dict['inputs_args'] = tuple(inputs_args)
        os.chdir(old_dir)
        # Create test.
        if test_dict['run_concurrent']:
            for input_arg in test_dict['inputs_args']:
                test_dict['inputs_args'] = (input_arg,)
                tests.append(testcode2.Test(name, test_program, path,
                                            **test_dict))
        else:
            tests.append(testcode2.Test(name, test_program, path, **test_dict))

    return (tests, test_categories)