Exemple #1
0
    def _verify_job(self, input_file, args, verbose=1, rundir=None):
        '''Check job against benchmark.

Assume function is executed in self.path.

IMPORTANT: use self.verify_job rather than self._verify_job if using multiple
threads.

Decorated to verify_job, which acquires directory lock and enters self.path
first, during initialisation.'''
        try:
            (status, msg) = self.skip_job(input_file, args, verbose)
        except exceptions.AnalysisError:
            if verbose > 2:
                err = sys.exc_info()[1]
                print(err)

        msg = ''
        try:
            if self.test_program.verify and not status.skipped():
                (status, msg) = self.verify_job_external(input_file, args,
                                                         verbose)
            elif not status.skipped():
                (bench_out, test_out) = self.extract_data(input_file, args,
                                                          verbose)
                (comparable, status, msg) = validation.compare_data(bench_out,
                        test_out, self.default_tolerance, self.tolerances)
                if verbose > 2:
                    # Include data tables in output.
                    if comparable:
                        # Combine test and benchmark dictionaries.
                        data_table = util.pretty_print_table(
                                ['benchmark', 'test'],
                                [bench_out, test_out])
                    else:
                        # Print dictionaries separately--couldn't even compare
                        # them!
                        data_table = '\n'.join((
                            util.pretty_print_table(['benchmark'], [bench_out]),
                            util.pretty_print_table(['test     '], [test_out])))
                    if msg.strip():
                        # join data table with error message from
                        # validation.compare_data.
                        msg = '\n'.join((msg, data_table))
                    else:
                        msg = data_table
        except exceptions.AnalysisError:
            if msg.strip():
                msg = '%s\n%s' % (msg, sys.exc_info()[1])
            else:
                msg = sys.exc_info()[1]
            status = validation.Status([False])

        self._update_status(status, (input_file, args))
        if verbose > 0 and verbose < 3:
            info_line = util.info_line(self.path, input_file, args, rundir)
            sys.stdout.write(info_line)
        status.print_status(msg, verbose)

        return (status, msg)
Exemple #2
0
    def _verify_job(self, input_file, args, verbose=1, rundir=None):
        '''Check job against benchmark.

Assume function is executed in self.path.

IMPORTANT: use self.verify_job rather than self._verify_job if using multiple
threads.

Decorated to verify_job, which acquires directory lock and enters self.path
first, during initialisation.'''
        # We already have DIR_LOCK, so use _skip_job instead of skip_job.
        (status, msg) = self._skip_job(input_file, args, verbose)
        try:
            if self.test_program.verify and not status.skipped():
                (status,
                 msg) = self.verify_job_external(input_file, args, verbose)
            elif not status.skipped():
                (bench_out,
                 test_out) = self.extract_data(input_file, args, verbose)
                (comparable, status, msg) = validation.compare_data(
                    bench_out, test_out, self.default_tolerance,
                    self.tolerances, self.test_program.ignore_fields)
                if verbose > 2:
                    # Include data tables in output.
                    if comparable:
                        # Combine test and benchmark dictionaries.
                        data_table = util.pretty_print_table(
                            ['benchmark', 'test'], [bench_out, test_out])
                    else:
                        # Print dictionaries separately--couldn't even compare
                        # them!
                        data_table = '\n'.join(
                            (util.pretty_print_table(['benchmark'],
                                                     [bench_out]),
                             util.pretty_print_table(['test     '],
                                                     [test_out])))
                    if msg.strip():
                        # join data table with error message from
                        # validation.compare_data.
                        msg = '\n'.join((msg, data_table))
                    else:
                        msg = data_table
        except (exceptions.AnalysisError, exceptions.TestCodeError):
            if msg.strip():
                msg = '%s\n%s' % (msg, sys.exc_info()[1])
            else:
                msg = sys.exc_info()[1]
            status = validation.Status([False])

        self._update_status(status, (input_file, args))
        if verbose > 0 and verbose < 3:
            info_line = util.info_line(self.path, input_file, args, rundir)
            sys.stdout.write(info_line)
        status.print_status(msg, verbose)

        return (status, msg)
Exemple #3
0
    def run_test(self, verbose=1, cluster_queue=None, rundir=None):
        '''Run all jobs in test.'''

        try:
            # Construct tests.
            test_cmds = []
            test_files = []
            for (test_input, test_arg) in self.inputs_args:
                if (test_input and not os.path.exists(
                        os.path.join(self.path, test_input))):
                    err = 'Input file does not exist: %s' % (test_input, )
                    raise exceptions.RunError(err)
                test_cmds.append(
                    self.test_program.run_cmd(test_input, test_arg,
                                              self.nprocs))
                test_files.append(
                    util.testcode_filename(FILESTEM['test'],
                                           self.test_program.test_id,
                                           test_input, test_arg))

            # Move files matching output pattern out of the way.
            self.move_old_output_files(verbose)

            # Run tests one-at-a-time locally or submit job in single submit
            # file to a queueing system.
            if cluster_queue:
                if self.output:
                    for (ind, test) in enumerate(test_cmds):
                        # Don't quote self.output if it contains any wildcards
                        # (assume the user set it up correctly!)
                        out = self.output
                        if not compat.compat_any(
                                wild in self.output
                                for wild in ['*', '?', '[', '{']):
                            out = pipes.quote(self.output)
                        test_cmds[ind] = '%s; mv %s %s' % (
                            test_cmds[ind], out, pipes.quote(test_files[ind]))
                test_cmds = ['\n'.join(test_cmds)]
            for (ind, test) in enumerate(test_cmds):
                job = self.start_job(test, cluster_queue, verbose)
                job.wait()
                # Analyse tests as they finish.
                if cluster_queue:
                    # Did all of them at once.
                    for (test_input, test_arg) in self.inputs_args:
                        self.verify_job(test_input, test_arg, verbose, rundir)
                else:
                    # Did one job at a time.
                    (test_input, test_arg) = self.inputs_args[ind]
                    err = []
                    if self.output:
                        try:
                            self.move_output_to_test_output(test_files[ind])
                        except exceptions.RunError:
                            err.append(sys.exc_info()[1])
                    status = validation.Status()
                    if job.returncode != 0:
                        err.insert(
                            0, 'Error running job.  Return code: %i' %
                            job.returncode)
                        (status, msg) = self.skip_job(test_input, test_arg,
                                                      verbose)
                    if status.skipped():
                        self._update_status(status, (test_input, test_arg))
                        if verbose > 0 and verbose < 3:
                            sys.stdout.write(
                                util.info_line(self.path, test_input, test_arg,
                                               rundir))
                        status.print_status(msg, verbose)
                    elif err:
                        # re-raise first error we hit.
                        raise exceptions.RunError(err[0])
                    else:
                        self.verify_job(test_input, test_arg, verbose, rundir)
        except exceptions.RunError:
            err = sys.exc_info()[1]
            if verbose > 2:
                err = 'Test(s) in %s failed.\n%s' % (self.path, err)
            status = validation.Status([False])
            self._update_status(status, (test_input, test_arg))
            if verbose > 0 and verbose < 3:
                info_line = util.info_line(self.path, test_input, test_arg,
                                           rundir)
                sys.stdout.write(info_line)
            status.print_status(err, verbose)
            # Shouldn't run remaining tests after such a catastrophic failure.
            # Mark all remaining tests as skipped so the user knows that they
            # weren't run.
            err = 'Previous test in %s caused a system failure.' % (self.path)
            status = validation.Status(name='skipped')
            for ((test_input, test_arg), stat) in self.status.items():
                if not self.status[(test_input, test_arg)]:
                    self._update_status(status, (test_input, test_arg))
                    if verbose > 2:
                        cmd = self.test_program.run_cmd(
                            test_input, test_arg, self.nprocs)
                        print('Test using %s in %s' % (cmd, self.path))
                    elif verbose > 0:
                        info_line = util.info_line(self.path, test_input,
                                                   test_arg, rundir)
                        sys.stdout.write(info_line)
                    status.print_status(err, verbose)
Exemple #4
0
    def run_test(self, verbose=1, cluster_queue=None, rundir=None):
        '''Run all jobs in test.'''

        try:
            # Construct tests.
            test_cmds = []
            test_files = []
            for (test_input, test_arg) in self.inputs_args:
                if (test_input and
                        not os.path.exists(os.path.join(self.path,test_input))):
                    err = 'Input file does not exist: %s' % (test_input,)
                    raise exceptions.RunError(err)
                test_cmds.append(self.test_program.run_cmd(test_input, test_arg,
                                                           self.nprocs))
                test_files.append(util.testcode_filename(FILESTEM['test'],
                        self.test_program.test_id, test_input, test_arg))

            # Move files matching output pattern out of the way.
            self.move_old_output_files(verbose)

            # Run tests one-at-a-time locally or submit job in single submit
            # file to a queueing system.
            if cluster_queue:
                if self.output:
                    for (ind, test) in enumerate(test_cmds):
                        # Don't quote self.output if it contains any wildcards
                        # (assume the user set it up correctly!)
                        out = self.output
                        if not compat.compat_any(wild in self.output for wild in
                                ['*', '?', '[', '{']):
                            out = pipes.quote(self.output)
                        test_cmds[ind] = '%s; mv %s %s' % (test_cmds[ind],
                                out, pipes.quote(test_files[ind]))
                test_cmds = ['\n'.join(test_cmds)]
            for (ind, test) in enumerate(test_cmds):
                job = self.start_job(test, cluster_queue, verbose)
                job.wait()
                # Analyse tests as they finish.
                if cluster_queue:
                    # Did all of them at once.
                    for (test_input, test_arg) in self.inputs_args:
                        self.verify_job(test_input, test_arg, verbose, rundir)
                else:
                    # Did one job at a time.
                    (test_input, test_arg) = self.inputs_args[ind]
                    err = []
                    if self.output:
                        try:
                            self.move_output_to_test_output(test_files[ind])
                        except exceptions.RunError:
                            err.append(sys.exc_info()[1])
                    status = validation.Status()
                    if job.returncode != 0:
                        err.insert(0, 'Error running job.  Return code: %i'
                                        % job.returncode)
                        (status, msg) = self.skip_job(test_input, test_arg,
                                                      verbose)
                    if status.skipped():
                        self._update_status(status, (test_input, test_arg))
                        if verbose > 0 and verbose < 3:
                            sys.stdout.write(
                                    util.info_line(self.path,
                                                   test_input, test_arg, rundir)
                                            )
                        status.print_status(msg, verbose)
                    elif err:
                        # re-raise first error we hit.
                        raise exceptions.RunError(err[0])
                    else:
                        self.verify_job(test_input, test_arg, verbose, rundir)
        except exceptions.RunError:
            err = sys.exc_info()[1]
            if verbose > 2:
                err = 'Test(s) in %s failed.\n%s' % (self.path, err)
            status = validation.Status([False])
            self._update_status(status, (test_input, test_arg))
            if verbose > 0 and verbose < 3:
                info_line = util.info_line(self.path, test_input, test_arg, rundir)
                sys.stdout.write(info_line)
            status.print_status(err, verbose)
            # Shouldn't run remaining tests after such a catastrophic failure.
            # Mark all remaining tests as skipped so the user knows that they
            # weren't run.
            err = 'Previous test in %s caused a system failure.' % (self.path)
            status = validation.Status(name='skipped')
            for ((test_input, test_arg), stat) in self.status.items():
                if not self.status[(test_input,test_arg)]:
                    self._update_status(status, (test_input, test_arg))
                    if verbose > 2:
                        cmd = self.test_program.run_cmd(test_input, test_arg,
                                                        self.nprocs)
                        print('Test using %s in %s' % (cmd, self.path))
                    elif verbose > 0:
                        info_line = util.info_line(self.path, test_input,
                                                   test_arg, rundir)
                        sys.stdout.write(info_line)
                    status.print_status(err, verbose)