Exemplo n.º 1
0
    def run_cmd(self, input_file, args, nprocs=0):
        '''Create run command.'''
        output_file = util.testcode_filename(FILESTEM['test'], self.test_id,
                                             input_file, args)
        error_file = util.testcode_filename(FILESTEM['error'], self.test_id,
                                            input_file, args)

        # Need to escape filenames for passing them to the shell.
        exe = pipes.quote(self.exe)
        output_file = pipes.quote(output_file)
        error_file = pipes.quote(error_file)

        cmd = self.run_cmd_template.replace('tc.program', exe)
        if type(input_file) is str:
            input_file = pipes.quote(input_file)
            cmd = cmd.replace('tc.input', input_file)
        else:
            cmd = cmd.replace('tc.input', '')
        if type(args) is str:
            cmd = cmd.replace('tc.args', args)
        else:
            cmd = cmd.replace('tc.args', '')
        cmd = cmd.replace('tc.output', output_file)
        cmd = cmd.replace('tc.error', error_file)
        if nprocs > 0 and self.launch_parallel:
            cmd = '%s %s' % (self.launch_parallel, cmd)
        cmd = cmd.replace('tc.nprocs', str(nprocs))
        return cmd
Exemplo n.º 2
0
    def create_new_benchmarks(self, benchmark, copy_files_since=None,
            copy_files_path='testcode_data'):
        '''Copy the test files to benchmark files.'''

        oldcwd = os.getcwd()
        os.chdir(self.path)

        test_files = []
        for (inp, arg) in self.inputs_args:
            test_file = util.testcode_filename(FILESTEM['test'],
                    self.test_program.test_id, inp, arg)
            err_file = util.testcode_filename(FILESTEM['error'],
                    self.test_program.test_id, inp, arg)
            bench_file = util.testcode_filename(_FILESTEM_DICT['benchmark'],
                    benchmark, inp, arg)
            test_files.extend((test_file, err_file, bench_file))
            shutil.copy(test_file, bench_file)

        if copy_files_since:
            if not os.path.isdir(copy_files_path):
                os.mkdir(copy_files_path)
            if os.path.isdir(copy_files_path):
                for data_file in glob.glob('*'):
                    if (os.path.isfile(data_file) and
                            os.stat(data_file)[-2] >= copy_files_since and
                            data_file not in test_files):
                        bench_data_file = os.path.join(copy_files_path,
                                data_file)
                        # shutil.copy can't overwrite files so remove old ones
                        # with the same name.
                        if os.path.exists(bench_data_file):
                            os.unlink(bench_data_file)
                        shutil.copy(data_file, bench_data_file)

        os.chdir(oldcwd)
Exemplo n.º 3
0
    def create_new_benchmarks(self, benchmark, copy_files_since=None,
            copy_files_path='testcode_data'):
        '''Copy the test files to benchmark files.'''

        oldcwd = os.getcwd()
        os.chdir(self.path)

        test_files = []
        for (inp, arg) in self.inputs_args:
            test_file = util.testcode_filename(FILESTEM['test'],
                    self.test_program.test_id, inp, arg)
            err_file = util.testcode_filename(FILESTEM['error'],
                    self.test_program.test_id, inp, arg)
            bench_file = util.testcode_filename(_FILESTEM_DICT['benchmark'],
                    benchmark, inp, arg)
            test_files.extend((test_file, err_file, bench_file))
            shutil.copy(test_file, bench_file)

        if copy_files_since:
            if not os.path.isdir(copy_files_path):
                os.mkdir(copy_files_path)
            if os.path.isdir(copy_files_path):
                for data_file in glob.glob('*'):
                    if (os.path.isfile(data_file) and
                            os.stat(data_file)[-2] >= copy_files_since and
                            data_file not in test_files):
                        bench_data_file = os.path.join(copy_files_path,
                                data_file)
                        # shutil.copy can't overwrite files so remove old ones
                        # with the same name.
                        if os.path.exists(bench_data_file):
                            os.unlink(bench_data_file)
                        shutil.copy(data_file, bench_data_file)

        os.chdir(oldcwd)
Exemplo n.º 4
0
    def run_cmd(self, input_file, args, nprocs=0):
        '''Create run command.'''
        output_file = util.testcode_filename(FILESTEM['test'], self.test_id,
                input_file, args)
        error_file = util.testcode_filename(FILESTEM['error'], self.test_id,
                input_file, args)

        # Need to escape filenames for passing them to the shell.
        exe = pipes.quote(self.exe)
        output_file = pipes.quote(output_file)
        error_file = pipes.quote(error_file)

        cmd = self.run_cmd_template.replace('tc.program', exe)
        if type(input_file) is str:
            input_file = pipes.quote(input_file)
            cmd = cmd.replace('tc.input', input_file)
        else:
            cmd = cmd.replace('tc.input', '')
        if type(args) is str:
            cmd = cmd.replace('tc.args', args)
        else:
            cmd = cmd.replace('tc.args', '')
        cmd = cmd.replace('tc.output', output_file)
        cmd = cmd.replace('tc.error', error_file)
        if nprocs > 0 and self.launch_parallel:
            cmd = '%s %s' % (self.launch_parallel, cmd)
        cmd = cmd.replace('tc.nprocs', str(nprocs))
        return cmd
Exemplo n.º 5
0
    def extract_data(self, input_file, args, verbose=1):
        '''Extract data from output file.

Assume function is executed in self.path.'''
        tp_ptr = self.test_program
        if tp_ptr.data_tag:
            # Using internal data extraction function.
            data_files = [
                    util.testcode_filename(FILESTEM['benchmark'],
                            tp_ptr.benchmark, input_file, args),
                    util.testcode_filename(FILESTEM['test'],
                            tp_ptr.test_id, input_file, args),
                         ]
            if verbose > 2:
                print('Analysing output using data_tag %s in %s on files %s.' %
                        (tp_ptr.data_tag, self.path, ' and '.join(data_files)))
            outputs = [util.extract_tagged_data(tp_ptr.data_tag, dfile)
                    for dfile in data_files]
        else:
            # Using external data extraction script.
            # Get extraction commands.
            extract_cmds = self.test_program.extract_cmd(input_file, args)

            # Extract data.
            outputs = []
            for cmd in extract_cmds:
                try:
                    if verbose > 2:
                        print('Analysing output using %s in %s.' %
                                (cmd, self.path))
                    extract_popen = subprocess.Popen(cmd, shell=True,
                            stdout=subprocess.PIPE, stderr=subprocess.PIPE)
                    extract_popen.wait()
                except OSError:
                    # slightly odd syntax in order to be compatible with python
                    # 2.5 and python 2.6/3
                    err = 'Analysing output failed: %s' % (sys.exc_info()[1],)
                    raise exceptions.AnalysisError(err)
                # Convert data string from extract command to dictionary format.
                if extract_popen.returncode != 0:
                    err = extract_popen.communicate()[1].decode('utf-8')
                    err = 'Analysing output failed: %s' % (err)
                    raise exceptions.AnalysisError(err)
                data_string = extract_popen.communicate()[0].decode('utf-8')
                if self.test_program.extract_fmt == 'table':
                    outputs.append(util.dict_table_string(data_string))
                elif self.test_program.extract_fmt == 'yaml':
                    outputs.append({})
                    # convert values to be in a tuple so the format matches
                    # that from dict_table_string.
                    # ensure all keys are strings so they can be sorted
                    # (different data types cause problems!)
                    for (key, val) in yaml.safe_load(data_string).items():
                        if isinstance(val, list):
                            outputs[-1][str(key)] = tuple(val)
                        else:
                            outputs[-1][str(key)] = tuple((val,))

        return tuple(outputs)
Exemplo n.º 6
0
 def skip_cmd(self, input_file, args):
     '''Create skip command.'''
     test_file = util.testcode_filename(FILESTEM['test'], self.test_id,
                                        input_file, args)
     error_file = util.testcode_filename(FILESTEM['error'], self.test_id,
                                         input_file, args)
     cmd = self.skip_cmd_template
     cmd = cmd.replace('tc.skip', pipes.quote(self.skip_program))
     cmd = cmd.replace('tc.args', self.skip_args)
     cmd = cmd.replace('tc.test', pipes.quote(test_file))
     cmd = cmd.replace('tc.error', pipes.quote(error_file))
     return cmd
Exemplo n.º 7
0
 def skip_cmd(self, input_file, args):
     '''Create skip command.'''
     test_file = util.testcode_filename(FILESTEM['test'], self.test_id,
             input_file, args)
     error_file = util.testcode_filename(FILESTEM['error'], self.test_id,
             input_file, args)
     cmd = self.skip_cmd_template
     cmd = cmd.replace('tc.skip', pipes.quote(self.skip_program))
     cmd = cmd.replace('tc.args', self.skip_args)
     cmd = cmd.replace('tc.test', pipes.quote(test_file))
     cmd = cmd.replace('tc.error', pipes.quote(error_file))
     return cmd
Exemplo n.º 8
0
    def extract_data(self, input_file, args, verbose=1):
        '''Extract data from output file.

Assume function is executed in self.path.'''
        tp_ptr = self.test_program
        if tp_ptr.data_tag:
            # Using internal data extraction function.
            data_files = [
                    tp_ptr.select_benchmark_file(self.path, input_file, args),
                    util.testcode_filename(FILESTEM['test'],
                            tp_ptr.test_id, input_file, args),
                         ]
            if verbose > 2:
                print(('Analysing output using data_tag %s in %s on files %s.' %
                        (tp_ptr.data_tag, self.path, ' and '.join(data_files))))
            outputs = [util.extract_tagged_data(tp_ptr.data_tag, dfile)
                    for dfile in data_files]
        else:
            # Using external data extraction script.
            # Get extraction commands.
            extract_cmds = tp_ptr.extract_cmd(self.path, input_file, args)

            # Extract data.
            outputs = []
            for cmd in extract_cmds:
                try:
                    if verbose > 2:
                        print(('Analysing output using %s in %s.' %
                                (cmd, self.path)))
                    extract_popen = subprocess.run(cmd, shell=True,
                            stdout=subprocess.PIPE, stderr=subprocess.PIPE)
                except OSError:
                    # slightly odd syntax in order to be compatible with python
                    # 2.5 and python 2.6/3
                    err = 'Analysing output failed: %s' % (sys.exc_info()[1],)
                    raise exceptions.AnalysisError(err)
                # Convert data string from extract command to dictionary format.
                if extract_popen.returncode != 0:
                    err = extract_popen.communicate()[1].decode('utf-8')
                    err = 'Analysing output failed: %s' % (err)
                    raise exceptions.AnalysisError(err)
                data_string = extract_popen.stdout.decode('utf-8')
                if self.test_program.extract_fmt == 'table':
                    outputs.append(util.dict_table_string(data_string))
                elif self.test_program.extract_fmt == 'yaml':
                    outputs.append({})
                    # convert values to be in a tuple so the format matches
                    # that from dict_table_string.
                    # ensure all keys are strings so they can be sorted
                    # (different data types cause problems!)
                    for (key, val) in list(yaml.safe_load(data_string).items()):
                        if isinstance(val, list):
                            outputs[-1][str(key)] = tuple(val)
                        else:
                            outputs[-1][str(key)] = tuple((val,))

        return tuple(outputs)
Exemplo n.º 9
0
 def extract_cmd(self, input_file, args):
     '''Create extraction command(s).'''
     test_file = util.testcode_filename(FILESTEM['test'], self.test_id,
                                        input_file, args)
     bench_file = util.testcode_filename(FILESTEM['benchmark'],
                                         self.benchmark, input_file, args)
     cmd = self.extract_cmd_template
     cmd = cmd.replace('tc.extract', pipes.quote(self.extract_program))
     cmd = cmd.replace('tc.args', self.extract_args)
     if self.verify:
         # Single command to compare benchmark and test outputs.
         cmd = cmd.replace('tc.test', pipes.quote(test_file))
         cmd = cmd.replace('tc.bench', pipes.quote(bench_file))
         return (cmd, )
     else:
         # Need to return commands to extract data from the test and
         # benchmark outputs.
         test_cmd = cmd.replace('tc.file', pipes.quote(test_file))
         bench_cmd = cmd.replace('tc.file', pipes.quote(bench_file))
         return (bench_cmd, test_cmd)
Exemplo n.º 10
0
 def extract_cmd(self, input_file, args):
     '''Create extraction command(s).'''
     test_file = util.testcode_filename(FILESTEM['test'], self.test_id,
             input_file, args)
     bench_file = util.testcode_filename(FILESTEM['benchmark'],
             self.benchmark, input_file, args)
     cmd = self.extract_cmd_template
     cmd = cmd.replace('tc.extract', pipes.quote(self.extract_program))
     cmd = cmd.replace('tc.args', self.extract_args)
     if self.verify:
         # Single command to compare benchmark and test outputs.
         cmd = cmd.replace('tc.test', pipes.quote(test_file))
         cmd = cmd.replace('tc.bench', pipes.quote(bench_file))
         return (cmd,)
     else:
         # Need to return commands to extract data from the test and
         # benchmark outputs.
         test_cmd = cmd.replace('tc.file', pipes.quote(test_file))
         bench_cmd = cmd.replace('tc.file', pipes.quote(bench_file))
         return (bench_cmd, test_cmd)
Exemplo n.º 11
0
    def select_benchmark_file(self, path, input_file, args):
        '''Find the first benchmark file out of all benchmark IDs which exists.'''

        benchmark = None
        benchmarks = []
        for bench_id in self.benchmark:
            benchfile = util.testcode_filename(FILESTEM['benchmark'], bench_id,
                                               input_file, args)
            benchmarks.append(benchfile)
            if os.path.exists(os.path.join(path, benchfile)):
                benchmark = benchfile
                break
        if not benchmark:
            err = 'No benchmark found in %s.  Checked for: %s.'
            raise exceptions.TestCodeError(err % (path, ', '.join(benchmarks)))
        return benchmark
Exemplo n.º 12
0
    def select_benchmark_file(self, path, input_file, args):
        '''Find the first benchmark file out of all benchmark IDs which exists.'''

        benchmark = None
        benchmarks = []
        for bench_id in self.benchmark:
            benchfile = util.testcode_filename(FILESTEM['benchmark'], bench_id,
                    input_file, args)
            benchmarks.append(benchfile)
            if os.path.exists(os.path.join(path, benchfile)):
                benchmark = benchfile
                break
        if not benchmark:
            err = 'No benchmark found in %s.  Checked for: %s.'
            raise exceptions.TestCodeError(err % (path, ', '.join(benchmarks)))
        return benchmark
Exemplo n.º 13
0
    def extract_data(self, input_file, args, verbose=1):
        '''Extract data from output file.

Assume function is executed in self.path.'''
        tp_ptr = self.test_program
        if tp_ptr.data_tag:
            # Using internal data extraction function.
            data_files = [
                tp_ptr.select_benchmark_file(self.path, input_file, args),
                util.testcode_filename(FILESTEM['test'], tp_ptr.test_id,
                                       input_file, args),
            ]
            if verbose > 2:
                print('Analysing output using data_tag %s in %s on files %s.' %
                      (tp_ptr.data_tag, self.path, ' and '.join(data_files)))
            outputs = [
                util.extract_tagged_data(tp_ptr.data_tag, dfile)
                for dfile in data_files
            ]
        else:
            # Using external data extraction script.
            # Get extraction commands.
            extract_cmds = tp_ptr.extract_cmd(self.path, input_file, args)

            # Extract data.
            outputs = []
            for cmd in extract_cmds:
                try:
                    if verbose > 2:
                        print('Analysing output using %s in %s.' %
                              (cmd, self.path))
                    # Samuel Ponce: Popen.wait() creates deadlock if the data is too large
                    # See documented issue for example in:
                    # https://docs.python.org/2/library/subprocess.html#subprocess.Popen.returncode
                    #
                    # Previous code that create deadlock:
                    #extract_popen = subprocess.Popen(cmd, shell=True,
                    #        stdout=subprocess.PIPE, stderr=subprocess.PIPE)
                    #extract_popen.wait()
                    #
                    # New code (this might not be the best but work for me):
                    extract_popen = subprocess.Popen(cmd,
                                                     bufsize=1,
                                                     shell=True,
                                                     stdin=open(os.devnull),
                                                     stdout=subprocess.PIPE,
                                                     stderr=subprocess.PIPE)

                    lines = []
                    for line in iter(extract_popen.stdout.readline, ''):
                        #print line,
                        lines.append(line)

                except OSError:
                    # slightly odd syntax in order to be compatible with python
                    # 2.5 and python 2.6/3
                    err = 'Analysing output failed: %s' % (sys.exc_info()[1], )
                    raise exceptions.AnalysisError(err)
                # Convert data string from extract command to dictionary format.

                # SP: Because of the above change, the test below cannot be done:
                #if extract_popen.returncode != 0:
                #    err = extract_popen.communicate()[1].decode('utf-8')
                #    err = 'Analysing output failed: %s' % (err)
                #    raise exceptions.AnalysisError(err)
                #data_string = extract_popen.communicate()[0].decode('utf-8')
                data_string = ''.join(lines)

                if self.test_program.extract_fmt == 'table':
                    outputs.append(util.dict_table_string(data_string))
                elif self.test_program.extract_fmt == 'yaml':
                    outputs.append({})
                    # convert values to be in a tuple so the format matches
                    # that from dict_table_string.
                    # ensure all keys are strings so they can be sorted
                    # (different data types cause problems!)
                    for (key, val) in yaml.safe_load(data_string).items():
                        if isinstance(val, list):
                            outputs[-1][str(key)] = tuple(val)
                        else:
                            outputs[-1][str(key)] = tuple((val, ))

        return tuple(outputs)
Exemplo n.º 14
0
def parse_jobconfig(config_file, user_options, test_programs, settings=None):
    '''Parse the test configurations from the jobconfig file.

config_file: location of the jobconfig file, either relative or absolute.'''

    if not os.path.exists(config_file):
        raise exceptions.TestCodeError(
                'Job configuration file %s does not exist.' % (config_file)
                                      )

    # paths to the test directories can be specified relative to the config
    # file.
    config_directory = os.path.dirname(os.path.abspath(config_file))

    jobconfig = compat.configparser.RawConfigParser()
    jobconfig.optionxform = str # Case sensitive file.
    jobconfig.read(config_file)

    # Alter config file with additional settings provided.
    if settings:
        for (section_key, section) in settings.items():
            for (option_key, value) in section.items():
                jobconfig.set(section_key, option_key, value)

    # Parse job categories.
    # Just store as list of test names for now.
    if jobconfig.has_section('categories'):
        test_categories = dict(jobconfig.items('categories'))
        for (key, val) in test_categories.items():
            test_categories[key] = val.split()
        jobconfig.remove_section('categories')
    else:
        test_categories = {}

    # Parse individual sections for tests.
    # Note that sections/paths may contain globs and hence correspond to
    # multiple tests.
    # First, find out the tests each section corresponds to.
    test_sections = []
    for section in jobconfig.sections():
        # Expand any globs in the path/section name and create individual Test
        # objects for each one.
        if jobconfig.has_option(section, 'path'):
            path = os.path.join(config_directory,
                                jobconfig.get(section, 'path'))
            jobconfig.remove_option('path')
            globbed_tests = [(section, test_path)
                                            for test_path in glob.glob(path)]
        else:
            path = os.path.join(config_directory, section)
            globbed_tests = [(test_path, test_path)
                                            for test_path in glob.glob(path)]
            test_sections.append((section, globbed_tests))
    test_sections.sort(key=lambda sec_info: len(sec_info[1]), reverse=True)
    test_info = {}
    for (section, globbed_tests) in test_sections:
        test_dict = {}
        # test program
        if jobconfig.has_option(section, 'program'):
            test_program = test_programs[jobconfig.get(section, 'program')]
        else:
            test_program = test_programs[user_options['default_program']]
        # tolerances
        if jobconfig.has_option(section, 'tolerance'):
            test_dict['tolerances'] = {}
            for item in (
                    eval_nested_tuple(jobconfig.get(section,'tolerance'))
                        ):
                (name, tol) = parse_tolerance_tuple(item)
                test_dict['tolerances'][name] = tol
            jobconfig.remove_option(section, 'tolerance')
            if None in test_dict['tolerances']:
                test_dict['default_tolerance'] = test_dict['tolerances'][None]
        # inputs and arguments
        if jobconfig.has_option(section, 'inputs_args'):
            # format: (input, arg), (input, arg)'
            test_dict['inputs_args'] = (
                    eval_nested_tuple(jobconfig.get(section, 'inputs_args')))
            jobconfig.remove_option(section, 'inputs_args')
        if jobconfig.has_option(section, 'run_concurrent'):
            test_dict['run_concurrent'] = \
                    jobconfig.getboolean(section, 'run_concurrent')
            jobconfig.remove_option(section, 'run_concurrent')
        # Other options.
        for option in jobconfig.options(section):
            test_dict[option] = jobconfig.get(section, option)
        for key in ('nprocs', 'max_nprocs', 'min_nprocs'):
            if key in test_dict:
                test_dict[key] = int(test_dict[key])
        for (name, path) in globbed_tests:
            # Need to take care with tolerances: want to *update* existing
            # tolerance dictionary rather than overwrite it.
            # This means we can't just use test_dict to update the relevant
            # dictionary in test_info.
            tol = None
            if name in test_info:
                # Just update existing info.
                test = test_info[name]
                if  'tolerances' in test_dict:
                    test[2]['tolerances'].update(test_dict['tolerances'])
                    tol = test_dict.pop('tolerances')
                test[0] = test_program
                test[1] = path
                test[2].update(test_dict)
                if tol:
                    test_dict['tolerances'] = tol
            else:
                # Create new test_info value.
                # Merge with default values.
                # Default test options.
                default_test = test_program.default_test_settings
                test = dict(
                        inputs_args=default_test.inputs_args,
                        output=default_test.output,
                        default_tolerance=default_test.default_tolerance,
                        tolerances = copy.deepcopy(default_test.tolerances),
                        nprocs=default_test.nprocs,
                        min_nprocs=default_test.min_nprocs,
                        max_nprocs=default_test.max_nprocs,
                        run_concurrent=default_test.run_concurrent,
                    )
                if  'tolerances' in test_dict:
                    test['tolerances'].update(test_dict['tolerances'])
                    tol = test_dict.pop('tolerances')
                test.update(test_dict)
                # restore tolerances for next test in the glob.
                if tol:
                    test_dict['tolerances'] = tol
                test_info[name] = [test_program, path, copy.deepcopy(test)]

    # Now create the tests (after finding out what the input files are).
    tests = []
    for (name, (test_program, path, test_dict)) in test_info.items():
        old_dir = os.getcwd()
        os.chdir(path)
        # Expand any globs in the input files.
        inputs_args = []
        for input_arg in test_dict['inputs_args']:
            # Be a little forgiving for the input_args config option.
            # If we're given ('input'), then clearly the user meant for the
            # args option to be empty.  However, literal_eval returns
            # a string rather than a tuple in such cases, which causes
            # problems.
            if isinstance(input_arg, str):
                inp = input_arg
                arg = ''
            elif len(input_arg) == 2:
                inp = input_arg[0]
                arg = input_arg[1]
            else:
                inp = input_arg[0]
                arg = ''
            if inp:
                # the test, error and benchmark filenames contain the input
                # filename, so we need to filter them out.
                inp_files = sorted(glob.glob(inp))
                if not inp_files:
                    err = 'Cannot find input file %s in %s.' % (inp, path)
                    raise exceptions.TestCodeError(err)
                for inp_file in inp_files:
                    # We use a glob for the input argument to avoid the
                    # case where the argument is empty and hence a pattern
                    # such as *.inp also matches files like
                    # test.out.test_id.inp=x.inp and hence considering
                    # previous output files to actually be an input file in
                    # their own right.
                    test_files = [
                         util.testcode_filename(stem[1], '*', '*', arg)
                         for stem in testcode2._FILESTEM_TUPLE
                                 ]
                    testcode_files = []
                    for tc_file in test_files:
                        testcode_files.extend(glob.glob(tc_file))
                    if inp_file not in testcode_files:
                        inputs_args.append((inp_file, arg))
            else:
                inputs_args.append((inp, arg))
        test_dict['inputs_args'] = tuple(inputs_args)
        os.chdir(old_dir)
        # Create test.
        if test_dict['run_concurrent']:
            for input_arg in test_dict['inputs_args']:
                test_dict['inputs_args'] = (input_arg,)
                tests.append(testcode2.Test(name, test_program, path,
                                            **test_dict))
        else:
            tests.append(testcode2.Test(name, test_program, path, **test_dict))

    return (tests, test_categories)
Exemplo n.º 15
0
    def run_test(self, verbose=1, cluster_queue=None, rundir=None):
        '''Run all jobs in test.'''

        try:
            # Construct tests.
            test_cmds = []
            test_files = []
            for (test_input, test_arg) in self.inputs_args:
                if (test_input and
                        not os.path.exists(os.path.join(self.path,test_input))):
                    err = 'Input file does not exist: %s' % (test_input,)
                    raise exceptions.RunError(err)
                test_cmds.append(self.test_program.run_cmd(test_input, test_arg,
                                                           self.nprocs))
                test_files.append(util.testcode_filename(FILESTEM['test'],
                        self.test_program.test_id, test_input, test_arg))

            # Move files matching output pattern out of the way.
            self.move_old_output_files(verbose)

            # Run tests one-at-a-time locally or submit job in single submit
            # file to a queueing system.
            if cluster_queue:
                if self.output:
                    for (ind, test) in enumerate(test_cmds):
                        # Don't quote self.output if it contains any wildcards
                        # (assume the user set it up correctly!)
                        out = self.output
                        if not compat.compat_any(wild in self.output for wild in
                                ['*', '?', '[', '{']):
                            out = pipes.quote(self.output)
                        test_cmds[ind] = '%s; mv %s %s' % (test_cmds[ind],
                                out, pipes.quote(test_files[ind]))
                test_cmds = ['\n'.join(test_cmds)]
            for (ind, test) in enumerate(test_cmds):
                job = self.start_job(test, cluster_queue, verbose)
                job.wait()
                # Analyse tests as they finish.
                if cluster_queue:
                    # Did all of them at once.
                    for (test_input, test_arg) in self.inputs_args:
                        self.verify_job(test_input, test_arg, verbose, rundir)
                else:
                    # Did one job at a time.
                    (test_input, test_arg) = self.inputs_args[ind]
                    err = []
                    if self.output:
                        try:
                            self.move_output_to_test_output(test_files[ind])
                        except exceptions.RunError:
                            err.append(sys.exc_info()[1])
                    status = validation.Status()
                    if job.returncode != 0:
                        err.insert(0, 'Error running job.  Return code: %i'
                                        % job.returncode)
                        (status, msg) = self.skip_job(test_input, test_arg,
                                                      verbose)
                    if status.skipped():
                        self._update_status(status, (test_input, test_arg))
                        if verbose > 0 and verbose < 3:
                            sys.stdout.write(
                                    util.info_line(self.path,
                                                   test_input, test_arg, rundir)
                                            )
                        status.print_status(msg, verbose)
                    elif err:
                        # re-raise first error we hit.
                        raise exceptions.RunError(err[0])
                    else:
                        self.verify_job(test_input, test_arg, verbose, rundir)
        except exceptions.RunError:
            err = sys.exc_info()[1]
            if verbose > 2:
                err = 'Test(s) in %s failed.\n%s' % (self.path, err)
            status = validation.Status([False])
            self._update_status(status, (test_input, test_arg))
            if verbose > 0 and verbose < 3:
                info_line = util.info_line(self.path, test_input, test_arg, rundir)
                sys.stdout.write(info_line)
            status.print_status(err, verbose)
            # Shouldn't run remaining tests after such a catastrophic failure.
            # Mark all remaining tests as skipped so the user knows that they
            # weren't run.
            err = 'Previous test in %s caused a system failure.' % (self.path)
            status = validation.Status(name='skipped')
            for ((test_input, test_arg), stat) in self.status.items():
                if not self.status[(test_input,test_arg)]:
                    self._update_status(status, (test_input, test_arg))
                    if verbose > 2:
                        cmd = self.test_program.run_cmd(test_input, test_arg,
                                                        self.nprocs)
                        print('Test using %s in %s' % (cmd, self.path))
                    elif verbose > 0:
                        info_line = util.info_line(self.path, test_input,
                                                   test_arg, rundir)
                        sys.stdout.write(info_line)
                    status.print_status(err, verbose)
Exemplo n.º 16
0
    def run_test(self, verbose=1, cluster_queue=None, rundir=None):
        '''Run all jobs in test.'''

        try:
            # Construct tests.
            test_cmds = []
            test_files = []
            for (test_input, test_arg) in self.inputs_args:
                if (test_input and not os.path.exists(
                        os.path.join(self.path, test_input))):
                    err = 'Input file does not exist: %s' % (test_input, )
                    raise exceptions.RunError(err)
                test_cmds.append(
                    self.test_program.run_cmd(test_input, test_arg,
                                              self.nprocs))
                test_files.append(
                    util.testcode_filename(FILESTEM['test'],
                                           self.test_program.test_id,
                                           test_input, test_arg))

            # Move files matching output pattern out of the way.
            self.move_old_output_files(verbose)

            # Run tests one-at-a-time locally or submit job in single submit
            # file to a queueing system.
            if cluster_queue:
                if self.output:
                    for (ind, test) in enumerate(test_cmds):
                        # Don't quote self.output if it contains any wildcards
                        # (assume the user set it up correctly!)
                        out = self.output
                        if not compat.compat_any(
                                wild in self.output
                                for wild in ['*', '?', '[', '{']):
                            out = pipes.quote(self.output)
                        test_cmds[ind] = '%s; mv %s %s' % (
                            test_cmds[ind], out, pipes.quote(test_files[ind]))
                test_cmds = ['\n'.join(test_cmds)]
            for (ind, test) in enumerate(test_cmds):
                job = self.start_job(test, cluster_queue, verbose)
                job.wait()
                # Analyse tests as they finish.
                if cluster_queue:
                    # Did all of them at once.
                    for (test_input, test_arg) in self.inputs_args:
                        self.verify_job(test_input, test_arg, verbose, rundir)
                else:
                    # Did one job at a time.
                    (test_input, test_arg) = self.inputs_args[ind]
                    err = []
                    if self.output:
                        try:
                            self.move_output_to_test_output(test_files[ind])
                        except exceptions.RunError:
                            err.append(sys.exc_info()[1])
                    status = validation.Status()
                    if job.returncode != 0:
                        err.insert(
                            0, 'Error running job.  Return code: %i' %
                            job.returncode)
                        (status, msg) = self.skip_job(test_input, test_arg,
                                                      verbose)
                    if status.skipped():
                        self._update_status(status, (test_input, test_arg))
                        if verbose > 0 and verbose < 3:
                            sys.stdout.write(
                                util.info_line(self.path, test_input, test_arg,
                                               rundir))
                        status.print_status(msg, verbose)
                    elif err:
                        # re-raise first error we hit.
                        raise exceptions.RunError(err[0])
                    else:
                        self.verify_job(test_input, test_arg, verbose, rundir)
        except exceptions.RunError:
            err = sys.exc_info()[1]
            if verbose > 2:
                err = 'Test(s) in %s failed.\n%s' % (self.path, err)
            status = validation.Status([False])
            self._update_status(status, (test_input, test_arg))
            if verbose > 0 and verbose < 3:
                info_line = util.info_line(self.path, test_input, test_arg,
                                           rundir)
                sys.stdout.write(info_line)
            status.print_status(err, verbose)
            # Shouldn't run remaining tests after such a catastrophic failure.
            # Mark all remaining tests as skipped so the user knows that they
            # weren't run.
            err = 'Previous test in %s caused a system failure.' % (self.path)
            status = validation.Status(name='skipped')
            for ((test_input, test_arg), stat) in self.status.items():
                if not self.status[(test_input, test_arg)]:
                    self._update_status(status, (test_input, test_arg))
                    if verbose > 2:
                        cmd = self.test_program.run_cmd(
                            test_input, test_arg, self.nprocs)
                        print('Test using %s in %s' % (cmd, self.path))
                    elif verbose > 0:
                        info_line = util.info_line(self.path, test_input,
                                                   test_arg, rundir)
                        sys.stdout.write(info_line)
                    status.print_status(err, verbose)
Exemplo n.º 17
0
    def extract_data(self, input_file, args, verbose=1):
        '''Extract data from output file.

Assume function is executed in self.path.'''
        tp_ptr = self.test_program
        if tp_ptr.data_tag:
            # Using internal data extraction function.
            data_files = [
                    tp_ptr.select_benchmark_file(self.path, input_file, args),
                    util.testcode_filename(FILESTEM['test'],
                            tp_ptr.test_id, input_file, args),
                         ]
            if verbose > 2:
                print('Analysing output using data_tag %s in %s on files %s.' %
                        (tp_ptr.data_tag, self.path, ' and '.join(data_files)))
            outputs = [util.extract_tagged_data(tp_ptr.data_tag, dfile)
                    for dfile in data_files]
        else:
            # Using external data extraction script.
            # Get extraction commands.
            extract_cmds = tp_ptr.extract_cmd(self.path, input_file, args)

            # Extract data.
            outputs = []
            for cmd in extract_cmds:
                try:
                    if verbose > 2:
                        print('Analysing output using %s in %s.' %
                                (cmd, self.path))
                    # Samuel Ponce: Popen.wait() creates deadlock if the data is too large
                    # See documented issue for example in: 
                    # https://docs.python.org/2/library/subprocess.html#subprocess.Popen.returncode
                    #
                    # Previous code that create deadlock:
                    #extract_popen = subprocess.Popen(cmd, shell=True,
                    #        stdout=subprocess.PIPE, stderr=subprocess.PIPE)
                    #extract_popen.wait()
                    #
                    # New code (this might not be the best but work for me):
                    extract_popen = subprocess.Popen(cmd, bufsize=1, shell=True,
                         stdin=open(os.devnull), stdout=subprocess.PIPE, stderr=subprocess.PIPE)

                    lines = []
                    for line in iter(extract_popen.stdout.readline, ''):
                      #print line,
                      lines.append(line)                    

                except OSError:
                    # slightly odd syntax in order to be compatible with python
                    # 2.5 and python 2.6/3
                    err = 'Analysing output failed: %s' % (sys.exc_info()[1],)
                    raise exceptions.AnalysisError(err)
                # Convert data string from extract command to dictionary format.
                
                # SP: Because of the above change, the test below cannot be done:
                #if extract_popen.returncode != 0:
                #    err = extract_popen.communicate()[1].decode('utf-8')
                #    err = 'Analysing output failed: %s' % (err)
                #    raise exceptions.AnalysisError(err)
                #data_string = extract_popen.communicate()[0].decode('utf-8')
                data_string = ''.join(lines)                 

                if self.test_program.extract_fmt == 'table':
                    outputs.append(util.dict_table_string(data_string))
                elif self.test_program.extract_fmt == 'yaml':
                    outputs.append({})
                    # convert values to be in a tuple so the format matches
                    # that from dict_table_string.
                    # ensure all keys are strings so they can be sorted
                    # (different data types cause problems!)
                    for (key, val) in yaml.safe_load(data_string).items():
                        if isinstance(val, list):
                            outputs[-1][str(key)] = tuple(val)
                        else:
                            outputs[-1][str(key)] = tuple((val,))

        return tuple(outputs)