Пример #1
0
    def execute(self):
        """Run the testsuite and execute testcases."""

        # Add the support directory in the PYTHONPATH so that modules are
        # accessible from each test case.
        Env().add_search_path('PYTHONPATH', os.path.dirname(const.basedir))

        self.parse_command_line()

        self.testcase_runner = generate_run_testcase(
            os.path.join(BASEDIR, 'run-test'),
            self.discs, self.main.options)

        MainLoop(self.testcases,
                 self.testcase_runner,
                 self.collect_result,
                 self.main.options.mainloop_jobs)

        # Generate the report file
        diff = ReportDiff(
            self.main.options.output_dir,
            self.main.options.old_output_dir
        )
        diff.txt_image(self.main.options.report_file)
        self.log(self.format_testsuite_summary())
Пример #2
0
def run_testsuite(test_driver):
    """Run the testsuite

    PARAMETERS
      test_driver: path to the test driver (e.g. lib/python/run-test)
    """
    options = __parse_options()
    env = Env()

    test_list = [
        t for t in filter_list('tests/*', options.run_test) if os.path.isdir(t)
    ]

    # Various files needed or created by the testsuite
    setup_result_dir(options)

    discs = env.discriminants

    if options.discs:
        discs += options.discs

    run_testcase = generate_run_testcase(test_driver, discs, options)
    collect_result = generate_collect_result(options.output_dir,
                                             options.results_file,
                                             options.view_diffs)

    MainLoop(test_list, run_testcase, collect_result, options.mainloop_jobs)

    # Write report
    with open(options.output_dir + '/discs', 'w') as discs_f:
        discs_f.write(" ".join(discs))
    ReportDiff(options.output_dir,
               options.old_output_dir).txt_image(options.report_file)
Пример #3
0
    def start(self, tests, show_diffs=False, old_result_dir=None):
        """Start the testsuite"""
        # Generate the testcases list
        if tests:
            # tests parameter can be a file containing a list of tests
            if len(tests) == 1 and os.path.isfile(tests[0]):
                with open(tests[0]) as _list:
                    tests = [t.strip().split(':')[0] for t in _list]
            else:
                # user list of tests, ignore tailing / to be able to use
                # file completion
                tests = [t.rstrip('/') for t in tests]
        else:
            # Get all tests.py
            tests = [os.path.dirname(t) for t in sorted(glob('*/test.py'))]

        if not Env().testsuite_config.with_Z999:
            # Do not run Z999 test
            tests = [t for t in tests if t != 'Z999_xfail']

        test_metrics = {'total': len(tests)}

        # Run the main loop
        collect_result = generate_collect_result(
            options=self.options,
            output_diff=show_diffs,
            metrics=test_metrics)
        run_testcase = generate_run_testcase('run-test', self.discs,
                                             Env().testsuite_config)
        MainLoop(tests, run_testcase, collect_result,
                 Env().testsuite_config.mainloop_jobs)

        if self.options.retry_threshold > 0:
            # Set skip if ok and run the testsuite if mainloop_jobs set to 1
            # to avoid parallelism problems on the tests that have previously
            # failed.
            if test_metrics['failed'] < self.options.retry_threshold:
                logging.warning("%d tests have failed (threshold was %d)."
                                " Retrying..."
                                % (test_metrics['failed'],
                                   self.options.retry_threshold))

                # Regenerate collect_result function
                self.options.skip_if_ok = True
                self.options.skip_if_dead = True
                collect_result = generate_collect_result(
                    options=self.options,
                    output_diff=show_diffs,
                    metrics=test_metrics)
                MainLoop(tests, run_testcase, collect_result, 1)
            else:
                logging.error("Too many errors")

        # Write report
        ReportDiff(self.options.output_dir,
                   self.options.old_output_dir).txt_image(
                       self.options.report_file)
Пример #4
0
def main():
    """Run the testsuite"""
    options = __parse_options()
    assert os.path.exists(makedir("bin")), \
        "cannot find %s directory" % makedir("bin")
    assert os.path.exists(makedir("rbin")), \
        "cannot find %s directory" % makedir("rbin")
    env = Env()
    env.add_search_path("PYTHONPATH", os.getcwd())

    test_list = [
        t for t in filter_list('tests/*', options.run_test) if os.path.isdir(t)
    ]

    # Various files needed or created by the testsuite
    result_dir = options.output_dir
    results_file = result_dir + '/results'

    if os.path.exists(result_dir):
        rm(result_dir, True)

    mkdir(result_dir)

    discs = env.discriminants

    if options.discs:
        discs += options.discs

    def test_build_cmd(test, _):
        """Run the given test"""
        cmd = [
            sys.executable, 'run-test', '-d', ",".join(discs), '-o',
            result_dir, '-t', options.tmp, test
        ]
        if options.verbose:
            cmd.append('-v')
        if options.host:
            cmd.append('--host=' + options.host)
        if options.target:
            cmd.append('--target=' + options.target)
        if not options.enable_cleanup:
            cmd.append('--disable-cleanup')
        return Run(cmd, bg=True)

    collect_result = generate_collect_result(result_dir, results_file,
                                             options.view_diffs)

    MainLoop(test_list, test_build_cmd, collect_result, options.mainloop_jobs)

    # Write report
    with open(result_dir + '/discs', 'w') as discs_f:
        discs_f.write(" ".join(discs))
    ReportDiff(result_dir,
               options.old_result_dir).txt_image(result_dir + '/report')
Пример #5
0
def main():
    """Run the testsuite"""

    m = Main()
    add_mainloop_options(m, extended_options=True)
    add_run_test_options(m)
    m.add_option("--diffs",
                 dest="view_diffs",
                 action="store_true",
                 default=False,
                 help="show diffs on stdout")
    m.parse_args()

    # Various files needed or created by the testsuite
    # creates :
    #   the ouput directory (out by default)
    #   the report file
    #   the results file

    setup_result_dir(m.options)

    if m.args:
        test_list = [t.strip('/') for t in m.args]
    else:
        test_list = sorted(glob('tests/*'))

    env = Env()

    # add support module path
    python_lib = os.path.join(os.getcwd(), 'lib', 'python')
    Env().add_search_path("PYTHONPATH", python_lib)

    env.add_search_path('PYTHONPATH', os.getcwd())
    discs = [env.target.platform]

    if m.options.discs:
        discs += m.options.discs.split(',')

    collect_result = generate_collect_result(m.options.output_dir,
                                             m.options.results_file,
                                             m.options.view_diffs)

    run_testcase = generate_run_testcase(python_lib + '/run-test', discs,
                                         m.options)

    MainLoop(test_list, run_testcase, collect_result, m.options.mainloop_jobs)
    # Generate the report file
    ReportDiff(m.options.output_dir,
               m.options.old_output_dir).txt_image(m.options.report_file)
Пример #6
0
    def dump_testsuite_result(self):
        """Dump testsuite result files.

        Dump the content of all <test>.yaml files and create report,
        result and comment files.
        """
        testsuite_results = os.path.join(self.output_dir, 'results')
        testsuite_report = os.path.join(self.output_dir, 'report')
        testsuite_comment = os.path.join(self.output_dir, 'comment')

        with open(testsuite_comment, 'w') as f:
            self.write_comment_file(f)

        touch(testsuite_results)

        # Mapping: test status -> hits. Computed to display the testsuite run
        # summary.
        summary = collections.defaultdict(lambda: 0)

        for test_result in find(self.output_dir, '*.yaml'):

            if os.path.basename(test_result) != 'global_env.yaml':
                with open(test_result, "rb") as fd:
                    tr_yaml = yaml.load(fd)

                if tr_yaml:
                    # result in results file
                    echo_to_file(testsuite_results,
                                 '%s:%s: %s\n' %
                                 (tr_yaml.test_env['test_name'],
                                  tr_yaml.status, tr_yaml.msg),
                                 append=True)

                    tr_yaml.dump_result(self.output_dir)
                    summary[tr_yaml.status] += 1

        try:
            report = ReportDiff(self.output_dir,
                                self.old_output_dir,
                                use_diff=True)
        except:
            report = ReportDiff(self.output_dir, self.old_output_dir)

        report.txt_image(testsuite_report)

        summary_msg = ['Summary:']
        for status in sorted(summary):
            hits = summary[status]
            summary_msg.append('  {}: {} test{}'.format(
                status, summary[status], 's' if hits > 1 else ''))
        logging.info('\n'.join(summary_msg))
Пример #7
0
def main():
    """Run the testsuite"""

    m = Main()
    add_mainloop_options(m, extended_options=True)
    add_run_test_options(m)
    m.add_option("--diffs",
                 dest="view_diffs",
                 action="store_true",
                 default=False,
                 help="show diffs on stdout")
    m.parse_args()

    # Various files needed or created by the testsuite
    # creates :
    #   the ouput directory (out by default)
    #   the report file
    #   the results file

    setup_result_dir(m.options)

    if m.args:
        test_list = [t.strip('/') for t in m.args]
    else:
        test_list = sorted(glob('tests/*'))

    env = Env()

    # add support module path
    python_lib = os.path.join(os.getcwd(), 'lib', 'python')
    Env().add_search_path("PYTHONPATH", python_lib)

    env.add_search_path('PYTHONPATH', os.getcwd())
    discs = [env.target.platform]

    if m.options.discs:
        discs += m.options.discs.split(',')

    test_metrics = {'total': len(test_list), 'uok': 0, 'invalid': 0}

    # Generate a standard 'collect_result' function...
    generated_collect_result = generate_collect_result(
        result_dir=m.options.output_dir,
        results_file=m.options.results_file,
        output_diff=m.options.view_diffs,
        metrics=test_metrics)

    # ... and then wrap that generated 'collect_result' function in something
    # that will also accumulate 'UOK' test results and failed tests
    def collect_test_metrics(name, process, _job_info):
        generated_collect_result(name, process, _job_info)
        test_name = os.path.basename(name)
        test_result = split_file(m.options.output_dir + '/' + test_name +
                                 '.result',
                                 ignore_errors=True)
        if test_result:
            test_status = test_result[0].split(':')[0]
            if test_status == 'UOK':
                test_metrics['uok'] += 1
            elif test_status == 'INVALID_TEST':
                test_metrics['invalid'] += 1

    run_testcase = generate_run_testcase('run-test', discs, m.options)

    MainLoop(test_list, run_testcase, collect_test_metrics,
             m.options.mainloop_jobs)

    print "Summary: Ran %(run)s/%(total)s tests, with %(failed)s failed, %(crashed)s crashed, %(uok)s unexpectedly passed, %(invalid)s invalid." % test_metrics

    # Generate the report file
    ReportDiff(m.options.output_dir,
               m.options.old_output_dir).txt_image(m.options.report_file)

    if (test_metrics['failed'] > 0 or test_metrics['crashed'] > 0
            or test_metrics['uok'] > 0 or test_metrics['invalid'] > 0):
        sys.exit(1)
Пример #8
0
def run_testsuite(test_driver):
    """Run the testsuite

    PARAMETERS
      test_driver: path to the test driver (e.g. lib/python/run-test)
    """
    options = __parse_options()
    env = Env()

    if options.vc_timeout:
        os.environ["vc_timeout"] = str(options.vc_timeout)
    if options.debug:
        os.environ["debug"] = "true"
    if options.verbose:
        os.environ["verbose"] = "true"
    if options.inverse_prover:
        os.environ["inverse_prover"] = "true"
    if options.benchmarks:
        os.environ["benchmarks"] = "true"
    if options.cache:
        os.environ["cache"] = "true"

    if options.test_list:
        with open(options.test_list, 'r') as f:
            test_list = f.readlines()
            test_list =\
                map(lambda s: os.path.join("tests", s.strip()), test_list)
            test_list = [t for t in test_list if os.path.isdir(t)]
    elif options.exact_name:
        test_name = os.path.join('tests/', options.run_test)
        if os.path.isdir(test_name):
            test_list = [test_name]
        else:
            print 'error: test \'' + options.run_test + '\' not found'
            exit(1)
    elif options.pattern:
        test_list = filter_list('tests/*')
        reg = re.compile(options.pattern)
        test_list = [
            test for test in test_list if test_contains_pattern(test, reg)
        ]
    else:
        test_list = [
            t for t in filter_list('tests/*', options.run_test)
            if os.path.isdir(t)
        ]

    # Various files needed or created by the testsuite
    setup_result_dir(options)

    discs = env.discriminants

    if options.discs:
        discs += options.discs

    run_testcase = generate_run_testcase(test_driver, discs, options)
    collect_result = generate_collect_result(options.output_dir,
                                             options.results_file,
                                             options.view_diffs)

    MainLoop(test_list, run_testcase, collect_result, options.mainloop_jobs)

    # Write report
    with open(options.output_dir + '/discs', 'w') as discs_f:
        discs_f.write(" ".join(discs))
    ReportDiff(options.output_dir,
               options.old_output_dir).txt_image(options.report_file)
Пример #9
0
def main():
    """Run the testsuite and generate reports"""
    # Parse the command lines options
    m = Main(add_targets_options=True)
    add_mainloop_options(m)
    add_run_test_options(m)
    m.add_option('--diffs',
                 dest='diffs',
                 action='store_true',
                 default=False,
                 help='show diffs on stdout')
    m.add_option("--old-result-dir",
                 type="string",
                 default=None,
                 help="Old result dir (to generate the report)")
    m.add_option('-b',
                 '--build-dir',
                 dest='build_dir',
                 help='separate PolyORB build directory')
    m.add_option('--testsuite-src-dir',
                 dest='testsuite_src_dir',
                 help='path to polyorb testsuite sources')
    m.add_option('--coverage',
                 dest='coverage',
                 action='store_true',
                 default=False,
                 help='generate coverage information')
    m.parse_args()

    # Various files needed or created by the testsuite
    results_file = m.options.output_dir + '/results'
    report_file = m.options.output_dir + '/report'

    if not m.options.failed_only:
        rm(m.options.output_dir, True)
        mkdir(m.options.output_dir)

    # Add current directory in PYTHONPATH (to find test_utils.py)
    env = Env()
    env.add_search_path('PYTHONPATH', os.path.join(os.getcwd(), 'tests'))
    fixed_support_dir = os.path.join(os.getcwd(), 'fixed_support_dir')
    env.add_search_path('FIXED_SUPPORT_DIR', fixed_support_dir)
    env.add_path(os.path.join(fixed_support_dir))
    env.add_path('.')  # many tests expect '.' in the PATH

    # Avoid extra debug traces
    os.environ['POLYORB_LOG_DEFAULT'] = 'error'

    # Generate the discs list for test.opt parsing
    # Always add 'ALL'
    common_discs = Env().discriminants

    # Be backward compatible with the old IDL tests
    # Set the polyorb discriminant and export the IDLCOMP
    # environment variable.
    common_discs.append('PolyORB')
    common_discs.append('PolyORB_IAC')
    os.environ['IDLCOMP'] = 'iac'

    # Retrieve also the polyorb specific discriminants
    p = Run([
        which('bash'),
        which('polyorb-config').replace('\\', '/'), '--config'
    ])

    # First find the support application perso.
    match = re.search('Application *personalities *: (.+)', p.out)
    if match is not None:
        common_discs += ['app_%s' % k for k in match.group(1).split()]

    # Then the supported protocols
    match = re.search('Protocol *personalities *: (.+)', p.out)
    if match is not None:
        common_discs += ['proto_%s' % k for k in match.group(1).split()]

    # Then the supported services
    match = re.search('Services *: (.+)', p.out)
    if match is not None:
        common_discs += ['serv_%s' % k for k in match.group(1).split()]

    # Do we have ssl support ?
    if re.search('SSL *support *: *yes', p.out):
        common_discs.append('ssl_support')

    with open(m.options.output_dir + '/discs', 'w') as f_disk:
        f_disk.write(", ".join(common_discs))

    # Expand ~ and ~user contructions for user PATH
    if m.options.build_dir is None:
        m.options.build_dir = os.path.join(os.getcwd(), os.pardir)
    else:
        m.options.build_dir = os.path.expanduser(m.options.build_dir)

    if m.options.testsuite_src_dir is None:
        m.options.testsuite_src_dir = os.path.join(os.getcwd())
    else:
        m.options.testsuite_src_dir = os.path.expanduser(
            m.options.testsuite_src_dir)

    # Compute the test list
    if m.args:
        test_glob = m.args[0]
    else:
        test_glob = None
    test_list = filter_list('./tests/*/*/*/test.py', test_glob)
    if os.path.isdir('regtests'):
        test_list.extend(filter_list('./regtests/*/test.*', test_glob))

    collect_result = generate_collect_result(m.options.output_dir,
                                             results_file, m.options.diffs)
    run_testcase = generate_run_testcase('tests/run-test.py', common_discs,
                                         m.options)

    os.environ['TEST_CONFIG'] = os.path.join(os.getcwd(), 'env.dump')
    env.options = m.options
    env.log_dir = os.path.join(os.getcwd(), 'log')
    env.store(os.environ['TEST_CONFIG'])

    if len(test_list) == 0:
        logger.error("No matching test found")
        return

    MainLoop(test_list, run_testcase, collect_result, m.options.mainloop_jobs)

    # Generate the report file
    ReportDiff(m.options.output_dir,
               m.options.old_result_dir).txt_image(report_file)