Beispiel #1
0
def main():
    _main = Main()
    _main.parse_args()
    try:
        generate_tags(_main.args[0])
    except IndexError:
        _main.error("where is makefile.setup ?")
Beispiel #2
0
def __parse_options():
    """Parse command lines options"""
    m = Main(add_targets_options=True)
    add_mainloop_options(m)
    add_run_test_options(m)
    m.add_option("--diffs",
                 dest="view_diffs",
                 action="store_true",
                 default=False,
                 help="Print .diff content")
    m.add_option("--old-result-dir",
                 type="string",
                 default=None,
                 help="Old result dir")
    m.parse_args()

    if m.args:
        m.options.run_test = m.args[0]
        # User want to run only one test
        print "Running only test '%s'" % m.options.run_test
    else:
        m.options.run_test = ""

    if m.options.discs:
        m.options.discs = m.options.discs.split(',')

    return m.options
Beispiel #3
0
    def __init__(self):
        self.duration = 0
        self.summary = defaultdict(lambda: 0)

        self.formatter = None
        self.testcases = None

        self.testcase_runner = None

        self.env = Env()
        self.discs = [self.env.target.platform]

        self.main = Main()
        add_mainloop_options(self.main, extended_options=True)
        add_run_test_options(self.main)

        self.main.add_option('--with-diff', action='store_true', default=False,
                             help='show diffs on stdout')

        self.main.add_option('--colorize', action='store_true',
                             default=False, help=argparse.SUPPRESS)
Beispiel #4
0
def main():
    _main = Main()
    _main.parse_args()
    try:
        generate_tags(_main.args[0])
    except IndexError:
        _main.error("where is makefile.setup ?")
Beispiel #5
0
def main():
    """Run the testsuite"""

    m = Main()
    add_mainloop_options(m, extended_options=True)
    add_run_test_options(m)
    m.add_option("--diffs",
                 dest="view_diffs",
                 action="store_true",
                 default=False,
                 help="show diffs on stdout")
    m.parse_args()

    # Various files needed or created by the testsuite
    # creates :
    #   the ouput directory (out by default)
    #   the report file
    #   the results file

    setup_result_dir(m.options)

    if m.args:
        test_list = [t.strip('/') for t in m.args]
    else:
        test_list = sorted(glob('tests/*'))

    env = Env()

    # add support module path
    python_lib = os.path.join(os.getcwd(), 'lib', 'python')
    Env().add_search_path("PYTHONPATH", python_lib)

    env.add_search_path('PYTHONPATH', os.getcwd())
    discs = [env.target.platform]

    if m.options.discs:
        discs += m.options.discs.split(',')

    collect_result = generate_collect_result(m.options.output_dir,
                                             m.options.results_file,
                                             m.options.view_diffs)

    run_testcase = generate_run_testcase(python_lib + '/run-test', discs,
                                         m.options)

    MainLoop(test_list, run_testcase, collect_result, m.options.mainloop_jobs)
    # Generate the report file
    ReportDiff(m.options.output_dir,
               m.options.old_output_dir).txt_image(m.options.report_file)
Beispiel #6
0
def __parse_options():
    """Parse command lines options"""
    m = Main(add_targets_options=True)
    add_mainloop_options(m)
    add_run_test_options(m)
    m.add_option("--diffs", dest="view_diffs", action="store_true",
                 default=False, help="Print .diff content")
    m.add_option("--old-result-dir", type="string", default=None,
                 help="Old result dir")
    m.parse_args()

    if m.args:
        m.options.run_test = m.args[0]
        # User want to run only one test
        print "Running only test '%s'" % m.options.run_test
    else:
        m.options.run_test = ""

    if m.options.discs:
        m.options.discs = m.options.discs.split(',')

    return m.options
Beispiel #7
0
def main():
    """Run the testsuite.
    """
    m = Main()
    add_mainloop_options(m, extended_options=True)
    add_run_test_options(m)
    m.add_option("--diffs",
                 dest="view_diffs",
                 action="store_true",
                 default=False,
                 help="show diffs on stdout")
    m.parse_args()

    # Create a tmp directory for the entire testsuite, to make sure
    # that, should the git hooks leak any file/directories, we can
    # (1) detect them, and (2) delete them.
    #
    # This requires some extra work to make sure that the scripts
    # being tested do actually use them, but this needs to be done
    # by each testcase, because we want each testcase to have its
    # own tmp directory (allowing for concurrency).  We pass that
    # information to the testcase through the GIT_HOOKS_TESTSUITE_TMP
    # environment variable.
    m.options.tmp = mkdtemp('', 'git-hooks-TS-', m.options.tmp)
    os.environ['GIT_HOOKS_TESTSUITE_TMP'] = m.options.tmp

    try:
        testcases = get_testcases(m.args)
        setup_result_dir(m.options)

        # We do not need discriminants in this testsuite at the moment.
        discs = None

        metrics = {}
        collect_result = generate_collect_result(metrics=metrics,
                                                 options=m.options)
        run_testcase = generate_run_testcase('bin/run-testcase', discs,
                                             m.options)

        MainLoop(testcases, run_testcase, collect_result,
                 m.options.mainloop_jobs)
        print_testsuite_results_summary(metrics)
    finally:
        rm(m.options.tmp, recursive=True)
Beispiel #8
0
def __parse_options():
    """Parse command lines options"""
    m = Main(add_targets_options=False)
    add_mainloop_options(m, extended_options=True)
    add_run_test_options(m)
    m.add_option("--diffs",
                 dest="view_diffs",
                 action="store_true",
                 default=False,
                 help="show diffs on stdout")
    m.parse_args()

    if m.args:
        m.options.run_test = m.args[0]
        print "Running only test '%s'" % m.options.run_test
    else:
        m.options.run_test = ""

    if m.options.discs:
        m.options.discs = m.options.discs.split(',')

    return m.options
Beispiel #9
0
def main():
    """Run the testsuite"""

    m = Main()
    add_mainloop_options(m, extended_options=True)
    add_run_test_options(m)
    m.add_option("--diffs",
                 dest="view_diffs",
                 action="store_true",
                 default=False,
                 help="show diffs on stdout")
    m.parse_args()

    # Various files needed or created by the testsuite
    # creates :
    #   the ouput directory (out by default)
    #   the report file
    #   the results file

    setup_result_dir(m.options)

    if m.args:
        test_list = [t.strip('/') for t in m.args]
    else:
        test_list = sorted(glob('tests/*'))

    env = Env()

    # add support module path
    python_lib = os.path.join(os.getcwd(), 'lib', 'python')
    Env().add_search_path("PYTHONPATH", python_lib)

    env.add_search_path('PYTHONPATH', os.getcwd())
    discs = [env.target.platform]

    if m.options.discs:
        discs += m.options.discs.split(',')

    test_metrics = {'total': len(test_list), 'uok': 0, 'invalid': 0}

    # Generate a standard 'collect_result' function...
    generated_collect_result = generate_collect_result(
        result_dir=m.options.output_dir,
        results_file=m.options.results_file,
        output_diff=m.options.view_diffs,
        metrics=test_metrics)

    # ... and then wrap that generated 'collect_result' function in something
    # that will also accumulate 'UOK' test results and failed tests
    def collect_test_metrics(name, process, _job_info):
        generated_collect_result(name, process, _job_info)
        test_name = os.path.basename(name)
        test_result = split_file(m.options.output_dir + '/' + test_name +
                                 '.result',
                                 ignore_errors=True)
        if test_result:
            test_status = test_result[0].split(':')[0]
            if test_status == 'UOK':
                test_metrics['uok'] += 1
            elif test_status == 'INVALID_TEST':
                test_metrics['invalid'] += 1

    run_testcase = generate_run_testcase('run-test', discs, m.options)

    MainLoop(test_list, run_testcase, collect_test_metrics,
             m.options.mainloop_jobs)

    print "Summary: Ran %(run)s/%(total)s tests, with %(failed)s failed, %(crashed)s crashed, %(uok)s unexpectedly passed, %(invalid)s invalid." % test_metrics

    # Generate the report file
    ReportDiff(m.options.output_dir,
               m.options.old_output_dir).txt_image(m.options.report_file)

    if (test_metrics['failed'] > 0 or test_metrics['crashed'] > 0
            or test_metrics['uok'] > 0 or test_metrics['invalid'] > 0):
        sys.exit(1)
Beispiel #10
0
def run_testsuite():
    """Main: parse command line and run the testsuite"""
    main = Main(formatter='%(message)s', add_targets_options=True)
    add_mainloop_options(main, extended_options=True)
    add_run_test_options(main)
    main.add_option("--with-Z999",
                    dest="with_Z999",
                    action="store_true",
                    default=False,
                    help="Add a test that always fail")
    main.add_option("--view-diffs",
                    dest="view_diffs",
                    action="store_true",
                    default=False,
                    help="show diffs on stdout")
    main.add_option("--diffs",
                    dest="view_diffs",
                    action="store_true",
                    default=False,
                    help="Alias for --view-diffs")
    main.add_option("--with-gprof",
                    dest="with_gprof",
                    action="store_true",
                    default=False,
                    help="Generate profiling reports")
    main.add_option("--with-gdb",
                    dest="with_gdb",
                    action="store_true",
                    default=False,
                    help="Run with gdb")
    main.add_option("--with-valgrind",
                    dest="with_valgrind",
                    action="store_true",
                    default=False,
                    help="Run with valgrind")
    main.add_option("--old-result-dir", type="string", help="Old result dir")
    main.add_option("--from-build-dir",
                    dest="from_build_dir",
                    action="store_true",
                    default=False,
                    help="Run testsuite from local build (in repository)")
    main.add_option('--retry-when-errors-lower-than',
                    dest='retry_threshold',
                    metavar="MAX_FAILED",
                    default=0,
                    type=int,
                    help="Retry the test that have failed if the number of "
                    "errors if lower than MAX_FAILED")
    main.parse_args()

    run = Runner(main.options)
    run.start(main.args, show_diffs=main.options.view_diffs)
Beispiel #11
0
def __parse_options():
    """Parse command lines options"""
    m = Main(add_targets_options=True)
    add_mainloop_options(m, extended_options=True)
    add_run_test_options(m)
    m.add_option("--benchmarks",
                 dest="benchmarks",
                 action="store_true",
                 default=False,
                 help="collect benchmarks")
    m.add_option("--debug",
                 dest="debug",
                 action="store_true",
                 default=False,
                 help="output debugging information")
    m.add_option("--diffs",
                 dest="view_diffs",
                 action="store_true",
                 default=False,
                 help="show diffs on stdout")
    m.add_option("--exact",
                 dest="exact_name",
                 action="store_true",
                 default=False,
                 help="provide exact name of test (not regexp)")
    m.add_option("--testlist",
                 dest="test_list",
                 action="store",
                 type="string",
                 help="provide text file with one test per line to be run")
    m.add_option("--pattern",
                 dest="pattern",
                 action="store",
                 type="string",
                 help="only run tests whose ada files contain this pattern")
    m.add_option("--inverse-prover",
                 dest="inverse_prover",
                 action="store_true",
                 default=False,
                 help="inverse order of default provers")
    m.add_option("--vc-timeout",
                 dest="vc_timeout",
                 action="store",
                 type="int",
                 help="set timeout for prover")
    m.add_option("--cache",
                 dest="cache",
                 action="store_true",
                 default=False,
                 help="use memcached to speed up testsuite")
    m.parse_args()

    if m.args:
        m.options.run_test = m.args[0]
        print "Running only test '%s'" % m.options.run_test
    else:
        m.options.run_test = ""

    if m.options.discs:
        m.options.discs = m.options.discs.split(',')

    return m.options
Beispiel #12
0
class Testsuite(object):
    """The GNAThub testsuite."""

    # Status that report an error
    ERRORS = ('DIFF', 'CRASH')

    def __init__(self):
        self.duration = 0
        self.summary = defaultdict(lambda: 0)

        self.formatter = None
        self.testcases = None

        self.testcase_runner = None

        self.env = Env()
        self.discs = [self.env.target.platform]

        self.main = Main()
        add_mainloop_options(self.main, extended_options=True)
        add_run_test_options(self.main)

        self.main.add_option('--with-diff', action='store_true', default=False,
                             help='show diffs on stdout')

        self.main.add_option('--colorize', action='store_true',
                             default=False, help=argparse.SUPPRESS)

    @staticmethod
    def find_testcases(directory):
        """Find all testcases in the given directory."""

        return set(sorted(find(directory, pattern='test.py') +
                          find(directory, pattern='test.sh')))

    @staticmethod
    def compute_testcases_list(args):
        """Return the list of testcases to execute.

        PARAMETERS
            args: the testsuite positional command-line arguments

        RETURNS
            the list of testcases
        """

        tests = None

        if args:
            tests = [os.path.relpath(os.path.join(ORIGIN, test), BASEDIR)
                     for test in args]
        else:
            basedir = os.path.join(BASEDIR, 'tests')
            tests = [os.path.relpath(os.path.dirname(p), BASEDIR)
                     for p in Testsuite.find_testcases(basedir)]

        return [TestEncoder.encode(path) for path in tests]

    def parse_command_line(self):
        """Handle command-line parsing and internal configuration."""

        self.main.parse_args()

        self.formatter = get_formatter_by_name(
            'terminal256' if self.main.options.colorize else 'null',
            style=GNAThubOutputStyle, encoding='utf-8')

        self.testcases = Testsuite.compute_testcases_list(self.main.args)
        self.testcases = sorted(self.testcases, key=lambda s: s.lower())

        if self.main.options.discs:
            self.discs.extend(self.main.options.discs.split(','))

        setup_result_dir(self.main.options)

    def execute(self):
        """Run the testsuite and execute testcases."""

        # Add the support directory in the PYTHONPATH so that modules are
        # accessible from each test case.
        Env().add_search_path('PYTHONPATH', os.path.dirname(const.basedir))

        self.parse_command_line()

        self.testcase_runner = generate_run_testcase(
            os.path.join(BASEDIR, 'run-test'),
            self.discs, self.main.options)

        MainLoop(self.testcases,
                 self.testcase_runner,
                 self.collect_result,
                 self.main.options.mainloop_jobs)

        # Generate the report file
        diff = ReportDiff(
            self.main.options.output_dir,
            self.main.options.old_output_dir
        )
        diff.txt_image(self.main.options.report_file)
        self.log(self.format_testsuite_summary())

    def log(self, tokens, stream=sys.stdout):
        """Log the input token stream with the standard Python logging
        mecanism.

        PARAMETERS
            log_fn: the logging function to use
            tokens: the input tokens stream
        """

        assert self.formatter is not None, 'Internal error'
        print >> stream, pygments.format(tokens, self.formatter)
        stream.flush()

    def collect_result(self, name, process, job_info):
        """Custom collect_result function."""

        def resource(ext):
            """Returns the path to the testcase resource with the given ext."""
            return os.path.join(
                self.main.options.output_dir, '%s.%s' % (name, ext))

        # Fetch the testcase results
        raw_result = split_file(resource('result'), ignore_errors=True)

        if raw_result:
            status, message = raw_result[0].split(':', 1)
        else:
            status, message = 'CRASH', 'testsuite internal error (no results)'

        if os.path.isfile(resource('time')):
            with open(resource('time'), 'r') as timing:
                duration = float(timing.read())
        else:
            duration = 0

        result = {
            'name': name,
            'status': status,
            'message': message,
            'duration': duration
        }

        # Increment the status count
        self.summary[status] += 1
        self.duration += duration

        # Store the testcase result in the results file
        echo_to_file(self.main.options.results_file,
                     '%(name)s:%(status)s: %(message)s\n' % result,
                     append=True)

        # Display the testcase result
        self.log(
            Testsuite.format_testcase_result(result),
            sys.stderr if status in Testsuite.ERRORS else sys.stdout)

        # Display the testcase diff if requested
        out_file = resource('out')
        diff_file = resource('diff')

        if status in Testsuite.ERRORS and self.main.options.with_diff:
            if os.path.isfile(diff_file):
                with open(diff_file, 'r') as diff:
                    self.log(
                        Testsuite.format_testcase_diff(diff.read().strip()),
                        stream=sys.stderr)
            elif os.path.isfile(out_file):
                with open(out_file, 'r') as out:
                    self.log(
                        Testsuite.format_testcase_diff(''.join([
                            '+{}\n'.format(line)
                            for line in out.read().strip().splitlines()
                        ])), stream=sys.stderr)

    @staticmethod
    def status_token(status):
        """Return the token to use for the given test case result status.

        RETURNS
            a pygments Token
        """

        return getattr(Token.TestResult, status, Token.Error)

    @staticmethod
    def format_testcase_diff(diff):
        """Format a testcase output diff.

        PARAMETERS
            diff: the diff content

        RETURNS
            a list of pygments' Tokens
        """

        def new_line_token():
            """Generate a new line token."""
            return Token.Whitespace, '\n'

        def indent_token():
            """Generate an indentation space token."""
            return Token.Whitespace, ' ' * 4

        tokens = []
        new_line = True

        # Because of logging prefixes, skip the first line to avoid
        # misalignment.
        tokens.append(new_line_token())

        for ttype, value in pygments.lex(diff, DiffLexer()):
            for subval in value.split('\n'):
                if new_line:
                    tokens.append(indent_token())

                new_line = not subval

                if subval:
                    tokens.append((ttype, subval))
                else:
                    tokens.append(new_line_token())

        return tokens

    @staticmethod
    def format_testcase_result(testcase):
        """Format the result of a single testcase.

        TESTCASE is expected to be a dictionary with the following attributes
        set:

            * name
            * status
            * message

        RETURNS
            a list of pygments' Tokens
        """

        pad = max(0, 5 - len(testcase['status']))
        tokens = []

        if pad:
            tokens.append((Token.Whitespace, ' ' * pad))

        tokens.extend([
            (Testsuite.status_token(testcase['status']), testcase['status']),
            (Token.Whitespace, '  '),
            (Token.Text, testcase['name'])
        ])

        if testcase['message']:
            tokens.extend([
                (Token.Punctuation, ':'),
                (Token.Whitespace, ' '),
                (Token.Comment, testcase['message'])
            ])

        if testcase['duration']:
            tokens.extend([
                (Token.Whitespace, ' '),
                (Token.Punctuation, '('),
                (Token.Comment, '%.2fs' % testcase['duration']),
                (Token.Punctuation, ')')
            ])

        return tokens

    def format_testsuite_summary(self):
        """Format the testsuite's execution summary.

        RETURNS
            a list of pygments' Tokens
        """

        return [
            (Token.Whitespace, '\n'),

            (Token.Number, str(sum(n for n in self.summary.values()))),
            (Token.Whitespace, ' '),
            (Token.Text, 'testcases executed'),
            (Token.Whitespace, ' '),
            (Token.Punctuation, '('),
            (Token.Comment, 'duration: %.2fs' % self.duration),
            (Token.Punctuation, ')'),
            (Token.Whitespace, '\n'),

            (Token.TestResult.OK,
             str(self.summary['OK'] + self.summary['UOK'])),
            (Token.Whitespace, ' '),
            (Token.Text, 'completed'),
            (Token.Punctuation, ','),
            (Token.Whitespace, ' '),

            (Token.TestResult.DIFF,
             str(self.summary['DIFF'] + self.summary['CRASH'])),
            (Token.Whitespace, ' '),
            (Token.Text, 'failed'),
            (Token.Punctuation, ','),
            (Token.Whitespace, ' '),

            (Token.TestResult.XFAIL, str(self.summary['XFAIL'])),
            (Token.Whitespace, ' '),
            (Token.Text, 'expectedly failed')
        ]
Beispiel #13
0
def run_testsuite():
    """Main: parse command line and run the testsuite"""
    main = Main(formatter='%(message)s', add_targets_options=True)
    add_mainloop_options(main, extended_options=True)
    add_run_test_options(main)
    main.add_option("--with-Z999", dest="with_Z999",
                    action="store_true", default=False,
                    help="Add a test that always fail")
    main.add_option("--view-diffs", dest="view_diffs", action="store_true",
                    default=False, help="show diffs on stdout")
    main.add_option("--diffs", dest="view_diffs", action="store_true",
                    default=False, help="Alias for --view-diffs")
    main.add_option("--with-gprof", dest="with_gprof", action="store_true",
                    default=False, help="Generate profiling reports")
    main.add_option("--with-gdb", dest="with_gdb", action="store_true",
                    default=False, help="Run with gdb")
    main.add_option("--with-valgrind", dest="with_valgrind",
                    action="store_true", default=False,
                    help="Run with valgrind")
    main.add_option("--old-result-dir", type="string",
                    help="Old result dir")
    main.add_option("--from-build-dir", dest="from_build_dir",
                    action="store_true", default=False,
                    help="Run testsuite from local build (in repository)")
    main.add_option('--retry-when-errors-lower-than', dest='retry_threshold',
                    metavar="MAX_FAILED", default=0, type=int,
                    help="Retry the test that have failed if the number of "
                    "errors if lower than MAX_FAILED")
    main.parse_args()

    run = Runner(main.options)
    run.start(main.args, show_diffs=main.options.view_diffs)
Beispiel #14
0
    def __cmdline_options(self):
        """Return an options object to represent the command line options"""
        main = Main(require_docstring=False, add_targets_options=True)
        main.add_option('--timeout', dest='timeout', type=int, default=None)
        main.add_option('--trace_dir',
                        dest='trace_dir',
                        metavar='DIR',
                        help='Traces location. No bootstrap if not specified.',
                        default=None)
        main.add_option('--report-file',
                        dest='report_file',
                        metavar='FILE',
                        help='The filename where to store the test report '
                        '[required]')
        main.add_option('--qualif-level',
                        dest='qualif_level',
                        metavar='QUALIF_LEVEL',
                        help='The target qualification level when we are '
                        'running in qualification mode.')

        main.add_option('--xcov-level',
                        dest='xcov_level',
                        help='Force the --level argument passed to xcov '
                        'instead of deducing it from the test category '
                        'when that normally happens.')

        main.add_option('--tags', dest='tags', default="")

        control.add_shared_options_to(main, toplevel=False)

        main.parse_args()
        if main.options.report_file is None:
            # This is a required "option" which is a bit self-contradictory,
            # but it's easy to do it that way.
            main.error("The report file must be specified with --report-file")

        # Get our tags set as a list. Fetch contents from file if needed
        # first:
        if main.options.tags and main.options.tags.startswith('@'):
            main.options.tags = ' '.join(lines_of(main.options.tags[1:]))
        if main.options.tags:
            main.options.tags = main.options.tags.split()

        return main.options
Beispiel #15
0
class TestsuiteCore(object):
    """Testsuite Core driver.

    This class is the base of Testsuite class and should not be instanciated.
    It's not recommended to override any of the functions declared in it.

    See documentation of Testsuite class for overridable methods and
    variables.
    """
    def __init__(self, root_dir):
        """Testsuite constructor.

        :param root_dir: root dir of the testsuite. Usually the directory in
            which testsuite.py and runtest.py are located
        :type root_dir: str | unicode
        """
        self.root_dir = os.path.abspath(root_dir)
        self.test_dir = os.path.join(self.root_dir, self.TEST_SUBDIR)
        self.global_env = {}
        self.test_env = {}
        self.global_env['root_dir'] = self.root_dir
        self.global_env['test_dir'] = self.test_dir
        self.consecutive_failures = 0

    def split_variant(self, name):
        """Split test scenario from the variant.

        :param name: the combination of test scenario and the variant
        :type name: str
        :return: a tuple with the test scnerio file and the variant
        :rtype: (str, str) | (str, None)
        """
        if '|' in name:
            test_scenario, test_variant_str = name.split('|', 1)
            test_variant = yaml.load(test_variant_str)
            return (test_scenario, test_variant)
        else:
            return [name, None]

    def test_result_filename(self, test_case_file, variant):
        """Return the name of the file in which the result are stored.

        :param test_case_file: path to a test case scenario relative to the
            test directory
        :type test_case_file: str | unicode
        :param variant: the test variant
        :type variant: str
        :return: the test name. Note that test names should not contain path
            separators
        :rtype: str | unicode
        """
        return os.path.join(self.output_dir,
                            self.test_name(test_case_file, variant)) + '.yaml'

    def dump_test_result(self, result=None, status=None, msg=None):
        """Dump a result into the test result file in the output directory.

        :param result: the result object to be dump. If None a new Result
            object is created on the fly
        :type result: Result | None
        :param status: override the status of the result object
        :type status: str
        :param msg: override the short message associated with the result
        :type msg: str | unicode
        """
        if result is None:
            result = Result(self.test_env)

        if status is not None:
            result.set_status(status, msg)

        with open(
                self.test_result_filename(self.test_case_file,
                                          self.test_variant), 'wb') as fd:
            yaml.dump(result, fd)

    def test_main(self):
        """Main function for the script in charge of running a single test.

        The script expect two parameters on the command line:

        * the output dir in which the results of the tests are saved
        * the path to the test.yaml file relative to the tests directory
        """
        self.output_dir = sys.argv[1]
        self.test_case_file, self.test_variant = \
            self.split_variant(sys.argv[2])

        logging.getLogger('').setLevel(RAW)
        add_handlers(
            level=RAW,
            format='%(asctime)s: %(name)-24s: '
            '%(levelname)-8s %(message)s',
            filename=os.path.join(
                self.output_dir,
                self.test_name(self.test_case_file, self.test_variant) +
                '.log'))

        with open(os.path.join(self.output_dir, 'global_env.yaml'),
                  'rb') as fd:
            self.global_env = yaml.load(fd.read())

        # Set target information
        Env().build = self.global_env['build']
        Env().host = self.global_env['host']
        Env().target = self.global_env['target']

        # Load testcase file
        self.test_env = load_with_config(
            os.path.join(self.test_dir, self.test_case_file),
            Env().to_dict())

        # Ensure that the test_env act like a dictionary
        if not isinstance(self.test_env, collections.Mapping):
            self.test_env = {
                'test_name': self.test_name(self.test_case_file,
                                            self.test_variant),
                'test_yaml_wrong_content': self.test_env
            }
            logger.error("abort test because of invalid test.yaml")
            self.dump_test_result(status="PROBLEM", msg="invalid test.yaml")
            return

        # Add to the test environment the directory in which the test.yaml is
        # stored
        self.test_env['test_dir'] = os.path.join(
            self.global_env['test_dir'], os.path.dirname(self.test_case_file))
        self.test_env['test_case_file'] = self.test_case_file
        self.test_env['test_variant'] = self.test_variant
        self.test_env['test_name'] = self.test_name(self.test_case_file,
                                                    self.test_variant)

        if 'driver' in self.test_env:
            driver = self.test_env['driver']
        else:
            driver = self.default_driver

        logger.debug('set driver to %s' % driver)
        if driver not in self.DRIVERS or \
                not issubclass(self.DRIVERS[driver], TestDriver):
            self.dump_test_result(status="PROBLEM", msg="cannot set driver")
            return

        try:
            instance = self.DRIVERS[driver](self.global_env, self.test_env)
        except Exception as e:
            error_msg = str(e)
            error_msg += "Traceback:\n"
            error_msg += "\n".join(traceback.format_tb(sys.exc_traceback))
            logger.error(error_msg)
            self.dump_test_result(status="PROBLEM",
                                  msg="exception during driver loading: %s" %
                                  str(e).split('\n')[0])
            return

        try:
            instance.tear_up()
            if instance.result.status == 'UNKNOWN':
                instance.run()
            if instance.result.status == 'UNKNOWN':
                instance.analyze()
        except Exception as e:
            error_msg = str(e)
            error_msg += "Traceback:\n"
            error_msg += "\n".join(traceback.format_tb(sys.exc_traceback))
            logger.error(error_msg)
            instance.result.set_status("PROBLEM",
                                       "exception: %s" % str(e).split('\n')[0])

        instance.tear_down()

        self.dump_test_result(instance.result)

    def dump_testsuite_result(self):
        """Dump testsuite result files.

        Dump the content of all <test>.yaml files and create report,
        result and comment files.
        """
        testsuite_results = os.path.join(self.output_dir, 'results')
        testsuite_report = os.path.join(self.output_dir, 'report')
        testsuite_comment = os.path.join(self.output_dir, 'comment')

        with open(testsuite_comment, 'w') as f:
            self.write_comment_file(f)

        touch(testsuite_results)

        # Mapping: test status -> hits. Computed to display the testsuite run
        # summary.
        summary = collections.defaultdict(lambda: 0)

        for test_result in find(self.output_dir, '*.yaml'):

            if os.path.basename(test_result) != 'global_env.yaml':
                with open(test_result, "rb") as fd:
                    tr_yaml = yaml.load(fd)

                if tr_yaml:
                    # result in results file
                    echo_to_file(testsuite_results,
                                 '%s:%s: %s\n' %
                                 (tr_yaml.test_env['test_name'],
                                  tr_yaml.status, tr_yaml.msg),
                                 append=True)

                    tr_yaml.dump_result(self.output_dir)
                    summary[tr_yaml.status] += 1

        try:
            report = ReportDiff(self.output_dir,
                                self.old_output_dir,
                                use_diff=True)
        except:
            report = ReportDiff(self.output_dir, self.old_output_dir)

        report.txt_image(testsuite_report)

        summary_msg = ['Summary:']
        for status in sorted(summary):
            hits = summary[status]
            summary_msg.append('  {}: {} test{}'.format(
                status, summary[status], 's' if hits > 1 else ''))
        logging.info('\n'.join(summary_msg))

    def testsuite_main(self):
        """Main for the main testsuite script."""
        self.main = Main(add_targets_options=self.CROSS_SUPPORT)

        # Add common options
        add_mainloop_options(self.main)
        self.main.add_option("-o",
                             "--output-dir",
                             metavar="DIR",
                             default="./out",
                             help="select output dir")
        self.main.add_option("-t",
                             "--temp-dir",
                             metavar="DIR",
                             default=Env().tmp_dir)
        self.main.add_option(
            "--max-consecutive-failures",
            default=0,
            help="If there are more than N consecutive failures, the testsuite"
            " is aborted. If set to 0 (default) then the testsuite will never"
            " be stopped")
        self.main.add_option(
            "--keep-old-output-dir",
            default=False,
            action="store_true",
            help="This is default with this testsuite framework. The option"
            " is kept only to keep backward compatibility of invocation with"
            " former framework (gnatpython.testdriver)")
        self.main.add_option("--disable-cleanup",
                             dest="enable_cleanup",
                             action="store_false",
                             default=True,
                             help="disable cleanup of working space")
        self.main.add_option(
            "--show-error-output",
            action="store_true",
            help="When testcases fail, display their output. This is for"
            " convenience for interactive use.")
        self.main.add_option(
            "--dump-environ",
            dest="dump_environ",
            action="store_true",
            default=False,
            help="Dump all environment variables in a file named environ.sh,"
            " located in the output directory (see --output-dir). This"
            " file can then be sourced from a Bourne shell to recreate"
            " the environement that existed when this testsuite was run"
            " to produce a given testsuite report.")

        # Add user defined options
        self.add_options()

        # parse options
        self.main.parse_args()

        # At this stage compute commonly used paths
        # Keep the working dir as short as possible, to avoid the risk
        # of having a path that's too long (a problem often seen on
        # Windows, or when using WRS tools that have their own max path
        # limitations).
        # Note that we do make sure that working_dir is an absolute
        # path, as we are likely to be changing directories when
        # running each test. A relative path would no longer work
        # under those circumstances.
        d = os.path.abspath(self.main.options.output_dir)
        self.output_dir = os.path.join(d, 'new')
        self.old_output_dir = os.path.join(d, 'old')

        if not os.path.isdir(self.main.options.temp_dir):
            logging.critical("temp dir '%s' does not exist",
                             self.main.options.temp_dir)
            sys.exit(1)

        self.working_dir = tempfile.mkdtemp(
            '', 'tmp', os.path.abspath(self.main.options.temp_dir))

        # Create the new output directory that will hold the results
        self.setup_result_dir()

        # Store in global env: target information and common paths
        self.global_env['build'] = Env().build
        self.global_env['host'] = Env().host
        self.global_env['target'] = Env().target
        self.global_env['output_dir'] = self.output_dir
        self.global_env['working_dir'] = self.working_dir
        self.global_env['options'] = self.main.options

        # User specific startup
        self.tear_up()

        # Retrieve the list of test
        self.test_list = self.get_test_list(self.main.args)

        # Dump global_env so that it can be used by test runners
        with open(os.path.join(self.output_dir, 'global_env.yaml'),
                  'wb') as fd:
            fd.write(yaml.dump(self.global_env))

        # Launch the mainloop
        self.total_test = len(self.test_list)
        self.run_test = 0

        MainLoop(self.test_list, self.launch_test, self.collect_result)

        self.dump_testsuite_result()

        # Clean everything
        self.tear_down()

    def launch_test(self, name, job_info):
        """Launch a test (mainloop callback)

        :param name: path to a test case file relative to the test directory
        :type name: str | unicode
        :param job_info: additional information associated with the worker
        :type job_info: (int, int)
        :return: a Run object
        :rtype: gnatpython.ex.Run
        """
        os.environ['WORKER_ID'] = str(job_info[0])
        return Run([
            sys.executable,
            os.path.join(self.root_dir, self.TEST_RUNNER), self.output_dir,
            name
        ],
                   bg=True,
                   output=None)

    def collect_result(self, name, process, _job_info):
        """Collect test results.

        See gnatpython.mainloop documentation
        """
        del process, _job_info
        test_name, test_variant = self.split_variant(name)
        result_file = self.test_result_filename(test_name, test_variant)
        if not os.path.isfile(result_file):
            result = Result()
            result.set_status("CRASH", "cannot find result file")
            with open(result_file, "wb") as fd:
                yaml.dump(result, fd)
        else:
            with open(result_file, "rb") as fd:
                result = yaml.load(fd)

        self.run_test += 1
        msg = "(%s/%s): %-32s: %s %s" % \
            (self.run_test, self.total_test,
             self.test_name(test_name, test_variant),
             result.status, result.msg)

        if Result.STATUS[result.status]:
            logger.error(msg)
            self.consecutive_failures += 1
            if self.main.options.show_error_output:
                logger.error('Testcase output was:\n' + result.actual_output)
        else:
            logger.info(msg)
            self.consecutive_failures = 0

        if 0 < self.main.options.max_consecutive_failures < \
                self.consecutive_failures:
            raise TooManyErrors

    def setup_result_dir(self):
        """Create the output directory in which the results are stored."""
        if os.path.isdir(self.old_output_dir):
            rm(self.old_output_dir, True)
        if os.path.isdir(self.output_dir):
            mv(self.output_dir, self.old_output_dir)
        mkdir(self.output_dir)

        if self.main.options.dump_environ:
            with open(os.path.join(self.output_dir, 'environ.sh'), 'w') as f:
                for var_name in sorted(os.environ):
                    f.write('export %s=%s\n' %
                            (var_name, quote_arg(os.environ[var_name])))
Beispiel #16
0
    def testsuite_main(self):
        """Main for the main testsuite script."""
        self.main = Main(add_targets_options=self.CROSS_SUPPORT)

        # Add common options
        add_mainloop_options(self.main)
        self.main.add_option("-o",
                             "--output-dir",
                             metavar="DIR",
                             default="./out",
                             help="select output dir")
        self.main.add_option("-t",
                             "--temp-dir",
                             metavar="DIR",
                             default=Env().tmp_dir)
        self.main.add_option(
            "--max-consecutive-failures",
            default=0,
            help="If there are more than N consecutive failures, the testsuite"
            " is aborted. If set to 0 (default) then the testsuite will never"
            " be stopped")
        self.main.add_option(
            "--keep-old-output-dir",
            default=False,
            action="store_true",
            help="This is default with this testsuite framework. The option"
            " is kept only to keep backward compatibility of invocation with"
            " former framework (gnatpython.testdriver)")
        self.main.add_option("--disable-cleanup",
                             dest="enable_cleanup",
                             action="store_false",
                             default=True,
                             help="disable cleanup of working space")
        self.main.add_option(
            "--show-error-output",
            action="store_true",
            help="When testcases fail, display their output. This is for"
            " convenience for interactive use.")
        self.main.add_option(
            "--dump-environ",
            dest="dump_environ",
            action="store_true",
            default=False,
            help="Dump all environment variables in a file named environ.sh,"
            " located in the output directory (see --output-dir). This"
            " file can then be sourced from a Bourne shell to recreate"
            " the environement that existed when this testsuite was run"
            " to produce a given testsuite report.")

        # Add user defined options
        self.add_options()

        # parse options
        self.main.parse_args()

        # At this stage compute commonly used paths
        # Keep the working dir as short as possible, to avoid the risk
        # of having a path that's too long (a problem often seen on
        # Windows, or when using WRS tools that have their own max path
        # limitations).
        # Note that we do make sure that working_dir is an absolute
        # path, as we are likely to be changing directories when
        # running each test. A relative path would no longer work
        # under those circumstances.
        d = os.path.abspath(self.main.options.output_dir)
        self.output_dir = os.path.join(d, 'new')
        self.old_output_dir = os.path.join(d, 'old')

        if not os.path.isdir(self.main.options.temp_dir):
            logging.critical("temp dir '%s' does not exist",
                             self.main.options.temp_dir)
            sys.exit(1)

        self.working_dir = tempfile.mkdtemp(
            '', 'tmp', os.path.abspath(self.main.options.temp_dir))

        # Create the new output directory that will hold the results
        self.setup_result_dir()

        # Store in global env: target information and common paths
        self.global_env['build'] = Env().build
        self.global_env['host'] = Env().host
        self.global_env['target'] = Env().target
        self.global_env['output_dir'] = self.output_dir
        self.global_env['working_dir'] = self.working_dir
        self.global_env['options'] = self.main.options

        # User specific startup
        self.tear_up()

        # Retrieve the list of test
        self.test_list = self.get_test_list(self.main.args)

        # Dump global_env so that it can be used by test runners
        with open(os.path.join(self.output_dir, 'global_env.yaml'),
                  'wb') as fd:
            fd.write(yaml.dump(self.global_env))

        # Launch the mainloop
        self.total_test = len(self.test_list)
        self.run_test = 0

        MainLoop(self.test_list, self.launch_test, self.collect_result)

        self.dump_testsuite_result()

        # Clean everything
        self.tear_down()
Beispiel #17
0
def main():
    """Run the testsuite and generate reports"""
    # Parse the command lines options
    m = Main(add_targets_options=True)
    add_mainloop_options(m)
    add_run_test_options(m)
    m.add_option('--diffs',
                 dest='diffs',
                 action='store_true',
                 default=False,
                 help='show diffs on stdout')
    m.add_option("--old-result-dir",
                 type="string",
                 default=None,
                 help="Old result dir (to generate the report)")
    m.add_option('-b',
                 '--build-dir',
                 dest='build_dir',
                 help='separate PolyORB build directory')
    m.add_option('--testsuite-src-dir',
                 dest='testsuite_src_dir',
                 help='path to polyorb testsuite sources')
    m.add_option('--coverage',
                 dest='coverage',
                 action='store_true',
                 default=False,
                 help='generate coverage information')
    m.parse_args()

    # Various files needed or created by the testsuite
    results_file = m.options.output_dir + '/results'
    report_file = m.options.output_dir + '/report'

    if not m.options.failed_only:
        rm(m.options.output_dir, True)
        mkdir(m.options.output_dir)

    # Add current directory in PYTHONPATH (to find test_utils.py)
    env = Env()
    env.add_search_path('PYTHONPATH', os.path.join(os.getcwd(), 'tests'))
    fixed_support_dir = os.path.join(os.getcwd(), 'fixed_support_dir')
    env.add_search_path('FIXED_SUPPORT_DIR', fixed_support_dir)
    env.add_path(os.path.join(fixed_support_dir))
    env.add_path('.')  # many tests expect '.' in the PATH

    # Avoid extra debug traces
    os.environ['POLYORB_LOG_DEFAULT'] = 'error'

    # Generate the discs list for test.opt parsing
    # Always add 'ALL'
    common_discs = Env().discriminants

    # Be backward compatible with the old IDL tests
    # Set the polyorb discriminant and export the IDLCOMP
    # environment variable.
    common_discs.append('PolyORB')
    common_discs.append('PolyORB_IAC')
    os.environ['IDLCOMP'] = 'iac'

    # Retrieve also the polyorb specific discriminants
    p = Run([
        which('bash'),
        which('polyorb-config').replace('\\', '/'), '--config'
    ])

    # First find the support application perso.
    match = re.search('Application *personalities *: (.+)', p.out)
    if match is not None:
        common_discs += ['app_%s' % k for k in match.group(1).split()]

    # Then the supported protocols
    match = re.search('Protocol *personalities *: (.+)', p.out)
    if match is not None:
        common_discs += ['proto_%s' % k for k in match.group(1).split()]

    # Then the supported services
    match = re.search('Services *: (.+)', p.out)
    if match is not None:
        common_discs += ['serv_%s' % k for k in match.group(1).split()]

    # Do we have ssl support ?
    if re.search('SSL *support *: *yes', p.out):
        common_discs.append('ssl_support')

    with open(m.options.output_dir + '/discs', 'w') as f_disk:
        f_disk.write(", ".join(common_discs))

    # Expand ~ and ~user contructions for user PATH
    if m.options.build_dir is None:
        m.options.build_dir = os.path.join(os.getcwd(), os.pardir)
    else:
        m.options.build_dir = os.path.expanduser(m.options.build_dir)

    if m.options.testsuite_src_dir is None:
        m.options.testsuite_src_dir = os.path.join(os.getcwd())
    else:
        m.options.testsuite_src_dir = os.path.expanduser(
            m.options.testsuite_src_dir)

    # Compute the test list
    if m.args:
        test_glob = m.args[0]
    else:
        test_glob = None
    test_list = filter_list('./tests/*/*/*/test.py', test_glob)
    if os.path.isdir('regtests'):
        test_list.extend(filter_list('./regtests/*/test.*', test_glob))

    collect_result = generate_collect_result(m.options.output_dir,
                                             results_file, m.options.diffs)
    run_testcase = generate_run_testcase('tests/run-test.py', common_discs,
                                         m.options)

    os.environ['TEST_CONFIG'] = os.path.join(os.getcwd(), 'env.dump')
    env.options = m.options
    env.log_dir = os.path.join(os.getcwd(), 'log')
    env.store(os.environ['TEST_CONFIG'])

    if len(test_list) == 0:
        logger.error("No matching test found")
        return

    MainLoop(test_list, run_testcase, collect_result, m.options.mainloop_jobs)

    # Generate the report file
    ReportDiff(m.options.output_dir,
               m.options.old_result_dir).txt_image(report_file)