Ejemplo n.º 1
0
def __parse_options():
    """Parse command lines options"""
    m = Main(add_targets_options=True)
    add_mainloop_options(m)
    add_run_test_options(m)
    m.add_option("--diffs",
                 dest="view_diffs",
                 action="store_true",
                 default=False,
                 help="Print .diff content")
    m.add_option("--old-result-dir",
                 type="string",
                 default=None,
                 help="Old result dir")
    m.parse_args()

    if m.args:
        m.options.run_test = m.args[0]
        # User want to run only one test
        print "Running only test '%s'" % m.options.run_test
    else:
        m.options.run_test = ""

    if m.options.discs:
        m.options.discs = m.options.discs.split(',')

    return m.options
Ejemplo n.º 2
0
def run_testsuite():
    """Main: parse command line and run the testsuite"""
    main = Main(formatter='%(message)s', add_targets_options=True)
    add_mainloop_options(main, extended_options=True)
    add_run_test_options(main)
    main.add_option("--with-Z999", dest="with_Z999",
                    action="store_true", default=False,
                    help="Add a test that always fail")
    main.add_option("--view-diffs", dest="view_diffs", action="store_true",
                    default=False, help="show diffs on stdout")
    main.add_option("--diffs", dest="view_diffs", action="store_true",
                    default=False, help="Alias for --view-diffs")
    main.add_option("--with-gprof", dest="with_gprof", action="store_true",
                    default=False, help="Generate profiling reports")
    main.add_option("--with-gdb", dest="with_gdb", action="store_true",
                    default=False, help="Run with gdb")
    main.add_option("--with-valgrind", dest="with_valgrind",
                    action="store_true", default=False,
                    help="Run with valgrind")
    main.add_option("--old-result-dir", type="string",
                    help="Old result dir")
    main.add_option("--from-build-dir", dest="from_build_dir",
                    action="store_true", default=False,
                    help="Run testsuite from local build (in repository)")
    main.add_option('--retry-when-errors-lower-than', dest='retry_threshold',
                    metavar="MAX_FAILED", default=0, type=int,
                    help="Retry the test that have failed if the number of "
                    "errors if lower than MAX_FAILED")
    main.parse_args()

    run = Runner(main.options)
    run.start(main.args, show_diffs=main.options.view_diffs)
Ejemplo n.º 3
0
def run_testsuite():
    """Main: parse command line and run the testsuite"""
    main = Main(formatter='%(message)s', add_targets_options=True)
    add_mainloop_options(main, extended_options=True)
    add_run_test_options(main)
    main.add_option("--with-Z999", dest="with_Z999",
                    action="store_true", default=False,
                    help="Add a test that always fail")
    main.add_option("--view-diffs", dest="view_diffs", action="store_true",
                    default=False, help="show diffs on stdout")
    main.add_option("--diffs", dest="view_diffs", action="store_true",
                    default=False, help="Alias for --view-diffs")
    main.add_option("--with-gprof", dest="with_gprof", action="store_true",
                    default=False, help="Generate profiling reports")
    main.add_option("--with-gdb", dest="with_gdb", action="store_true",
                    default=False, help="Run with gdb")
    main.add_option("--with-valgrind", dest="with_valgrind",
                    action="store_true", default=False,
                    help="Run with valgrind")
    main.add_option("--old-result-dir", type="string",
                    help="Old result dir")
    main.add_option("--from-build-dir", dest="from_build_dir",
                    action="store_true", default=False,
                    help="Run testsuite from local build (in repository)")
    main.add_option('--retry-when-errors-lower-than', dest='retry_threshold',
                    metavar="MAX_FAILED", default=0, type=int,
                    help="Retry the test that have failed if the number of "
                    "errors if lower than MAX_FAILED")
    main.parse_args()

    run = Runner(main.options)
    run.start(main.args, show_diffs=main.options.view_diffs)
Ejemplo n.º 4
0
def main():
    """Run the testsuite"""

    m = Main()
    add_mainloop_options(m, extended_options=True)
    add_run_test_options(m)
    m.add_option("--diffs",
                 dest="view_diffs",
                 action="store_true",
                 default=False,
                 help="show diffs on stdout")
    m.parse_args()

    # Various files needed or created by the testsuite
    # creates :
    #   the ouput directory (out by default)
    #   the report file
    #   the results file

    setup_result_dir(m.options)

    if m.args:
        test_list = [t.strip('/') for t in m.args]
    else:
        test_list = sorted(glob('tests/*'))

    env = Env()

    # add support module path
    python_lib = os.path.join(os.getcwd(), 'lib', 'python')
    Env().add_search_path("PYTHONPATH", python_lib)

    env.add_search_path('PYTHONPATH', os.getcwd())
    discs = [env.target.platform]

    if m.options.discs:
        discs += m.options.discs.split(',')

    collect_result = generate_collect_result(m.options.output_dir,
                                             m.options.results_file,
                                             m.options.view_diffs)

    run_testcase = generate_run_testcase(python_lib + '/run-test', discs,
                                         m.options)

    MainLoop(test_list, run_testcase, collect_result, m.options.mainloop_jobs)
    # Generate the report file
    ReportDiff(m.options.output_dir,
               m.options.old_output_dir).txt_image(m.options.report_file)
Ejemplo n.º 5
0
def main():
    """Run the testsuite.
    """
    m = Main()
    add_mainloop_options(m, extended_options=True)
    add_run_test_options(m)
    m.add_option("--diffs",
                 dest="view_diffs",
                 action="store_true",
                 default=False,
                 help="show diffs on stdout")
    m.parse_args()

    # Create a tmp directory for the entire testsuite, to make sure
    # that, should the git hooks leak any file/directories, we can
    # (1) detect them, and (2) delete them.
    #
    # This requires some extra work to make sure that the scripts
    # being tested do actually use them, but this needs to be done
    # by each testcase, because we want each testcase to have its
    # own tmp directory (allowing for concurrency).  We pass that
    # information to the testcase through the GIT_HOOKS_TESTSUITE_TMP
    # environment variable.
    m.options.tmp = mkdtemp('', 'git-hooks-TS-', m.options.tmp)
    os.environ['GIT_HOOKS_TESTSUITE_TMP'] = m.options.tmp

    try:
        testcases = get_testcases(m.args)
        setup_result_dir(m.options)

        # We do not need discriminants in this testsuite at the moment.
        discs = None

        metrics = {}
        collect_result = generate_collect_result(metrics=metrics,
                                                 options=m.options)
        run_testcase = generate_run_testcase('bin/run-testcase', discs,
                                             m.options)

        MainLoop(testcases, run_testcase, collect_result,
                 m.options.mainloop_jobs)
        print_testsuite_results_summary(metrics)
    finally:
        rm(m.options.tmp, recursive=True)
Ejemplo n.º 6
0
    def __init__(self):
        self.duration = 0
        self.summary = defaultdict(lambda: 0)

        self.formatter = None
        self.testcases = None

        self.testcase_runner = None

        self.env = Env()
        self.discs = [self.env.target.platform]

        self.main = Main()
        add_mainloop_options(self.main, extended_options=True)
        add_run_test_options(self.main)

        self.main.add_option('--with-diff', action='store_true', default=False,
                             help='show diffs on stdout')

        self.main.add_option('--colorize', action='store_true',
                             default=False, help=argparse.SUPPRESS)
Ejemplo n.º 7
0
def __parse_options():
    """Parse command lines options"""
    m = Main(add_targets_options=True)
    add_mainloop_options(m)
    add_run_test_options(m)
    m.add_option("--diffs", dest="view_diffs", action="store_true",
                 default=False, help="Print .diff content")
    m.add_option("--old-result-dir", type="string", default=None,
                 help="Old result dir")
    m.parse_args()

    if m.args:
        m.options.run_test = m.args[0]
        # User want to run only one test
        print "Running only test '%s'" % m.options.run_test
    else:
        m.options.run_test = ""

    if m.options.discs:
        m.options.discs = m.options.discs.split(',')

    return m.options
Ejemplo n.º 8
0
def __parse_options():
    """Parse command lines options"""
    m = Main(add_targets_options=False)
    add_mainloop_options(m, extended_options=True)
    add_run_test_options(m)
    m.add_option("--diffs",
                 dest="view_diffs",
                 action="store_true",
                 default=False,
                 help="show diffs on stdout")
    m.parse_args()

    if m.args:
        m.options.run_test = m.args[0]
        print "Running only test '%s'" % m.options.run_test
    else:
        m.options.run_test = ""

    if m.options.discs:
        m.options.discs = m.options.discs.split(',')

    return m.options
Ejemplo n.º 9
0
    def testsuite_main(self):
        """Main for the main testsuite script."""
        self.main = Main(add_targets_options=self.CROSS_SUPPORT)

        # Add common options
        add_mainloop_options(self.main)
        self.main.add_option("-o",
                             "--output-dir",
                             metavar="DIR",
                             default="./out",
                             help="select output dir")
        self.main.add_option("-t",
                             "--temp-dir",
                             metavar="DIR",
                             default=Env().tmp_dir)
        self.main.add_option(
            "--max-consecutive-failures",
            default=0,
            help="If there are more than N consecutive failures, the testsuite"
            " is aborted. If set to 0 (default) then the testsuite will never"
            " be stopped")
        self.main.add_option(
            "--keep-old-output-dir",
            default=False,
            action="store_true",
            help="This is default with this testsuite framework. The option"
            " is kept only to keep backward compatibility of invocation with"
            " former framework (gnatpython.testdriver)")
        self.main.add_option("--disable-cleanup",
                             dest="enable_cleanup",
                             action="store_false",
                             default=True,
                             help="disable cleanup of working space")
        self.main.add_option(
            "--show-error-output",
            action="store_true",
            help="When testcases fail, display their output. This is for"
            " convenience for interactive use.")
        self.main.add_option(
            "--dump-environ",
            dest="dump_environ",
            action="store_true",
            default=False,
            help="Dump all environment variables in a file named environ.sh,"
            " located in the output directory (see --output-dir). This"
            " file can then be sourced from a Bourne shell to recreate"
            " the environement that existed when this testsuite was run"
            " to produce a given testsuite report.")

        # Add user defined options
        self.add_options()

        # parse options
        self.main.parse_args()

        # At this stage compute commonly used paths
        # Keep the working dir as short as possible, to avoid the risk
        # of having a path that's too long (a problem often seen on
        # Windows, or when using WRS tools that have their own max path
        # limitations).
        # Note that we do make sure that working_dir is an absolute
        # path, as we are likely to be changing directories when
        # running each test. A relative path would no longer work
        # under those circumstances.
        d = os.path.abspath(self.main.options.output_dir)
        self.output_dir = os.path.join(d, 'new')
        self.old_output_dir = os.path.join(d, 'old')

        if not os.path.isdir(self.main.options.temp_dir):
            logging.critical("temp dir '%s' does not exist",
                             self.main.options.temp_dir)
            sys.exit(1)

        self.working_dir = tempfile.mkdtemp(
            '', 'tmp', os.path.abspath(self.main.options.temp_dir))

        # Create the new output directory that will hold the results
        self.setup_result_dir()

        # Store in global env: target information and common paths
        self.global_env['build'] = Env().build
        self.global_env['host'] = Env().host
        self.global_env['target'] = Env().target
        self.global_env['output_dir'] = self.output_dir
        self.global_env['working_dir'] = self.working_dir
        self.global_env['options'] = self.main.options

        # User specific startup
        self.tear_up()

        # Retrieve the list of test
        self.test_list = self.get_test_list(self.main.args)

        # Dump global_env so that it can be used by test runners
        with open(os.path.join(self.output_dir, 'global_env.yaml'),
                  'wb') as fd:
            fd.write(yaml.dump(self.global_env))

        # Launch the mainloop
        self.total_test = len(self.test_list)
        self.run_test = 0

        MainLoop(self.test_list, self.launch_test, self.collect_result)

        self.dump_testsuite_result()

        # Clean everything
        self.tear_down()
Ejemplo n.º 10
0
def main():
    """Run the testsuite"""

    m = Main()
    add_mainloop_options(m, extended_options=True)
    add_run_test_options(m)
    m.add_option("--diffs",
                 dest="view_diffs",
                 action="store_true",
                 default=False,
                 help="show diffs on stdout")
    m.parse_args()

    # Various files needed or created by the testsuite
    # creates :
    #   the ouput directory (out by default)
    #   the report file
    #   the results file

    setup_result_dir(m.options)

    if m.args:
        test_list = [t.strip('/') for t in m.args]
    else:
        test_list = sorted(glob('tests/*'))

    env = Env()

    # add support module path
    python_lib = os.path.join(os.getcwd(), 'lib', 'python')
    Env().add_search_path("PYTHONPATH", python_lib)

    env.add_search_path('PYTHONPATH', os.getcwd())
    discs = [env.target.platform]

    if m.options.discs:
        discs += m.options.discs.split(',')

    test_metrics = {'total': len(test_list), 'uok': 0, 'invalid': 0}

    # Generate a standard 'collect_result' function...
    generated_collect_result = generate_collect_result(
        result_dir=m.options.output_dir,
        results_file=m.options.results_file,
        output_diff=m.options.view_diffs,
        metrics=test_metrics)

    # ... and then wrap that generated 'collect_result' function in something
    # that will also accumulate 'UOK' test results and failed tests
    def collect_test_metrics(name, process, _job_info):
        generated_collect_result(name, process, _job_info)
        test_name = os.path.basename(name)
        test_result = split_file(m.options.output_dir + '/' + test_name +
                                 '.result',
                                 ignore_errors=True)
        if test_result:
            test_status = test_result[0].split(':')[0]
            if test_status == 'UOK':
                test_metrics['uok'] += 1
            elif test_status == 'INVALID_TEST':
                test_metrics['invalid'] += 1

    run_testcase = generate_run_testcase('run-test', discs, m.options)

    MainLoop(test_list, run_testcase, collect_test_metrics,
             m.options.mainloop_jobs)

    print "Summary: Ran %(run)s/%(total)s tests, with %(failed)s failed, %(crashed)s crashed, %(uok)s unexpectedly passed, %(invalid)s invalid." % test_metrics

    # Generate the report file
    ReportDiff(m.options.output_dir,
               m.options.old_output_dir).txt_image(m.options.report_file)

    if (test_metrics['failed'] > 0 or test_metrics['crashed'] > 0
            or test_metrics['uok'] > 0 or test_metrics['invalid'] > 0):
        sys.exit(1)
Ejemplo n.º 11
0
def __parse_options():
    """Parse command lines options"""
    m = Main(add_targets_options=True)
    add_mainloop_options(m, extended_options=True)
    add_run_test_options(m)
    m.add_option("--benchmarks",
                 dest="benchmarks",
                 action="store_true",
                 default=False,
                 help="collect benchmarks")
    m.add_option("--debug",
                 dest="debug",
                 action="store_true",
                 default=False,
                 help="output debugging information")
    m.add_option("--diffs",
                 dest="view_diffs",
                 action="store_true",
                 default=False,
                 help="show diffs on stdout")
    m.add_option("--exact",
                 dest="exact_name",
                 action="store_true",
                 default=False,
                 help="provide exact name of test (not regexp)")
    m.add_option("--testlist",
                 dest="test_list",
                 action="store",
                 type="string",
                 help="provide text file with one test per line to be run")
    m.add_option("--pattern",
                 dest="pattern",
                 action="store",
                 type="string",
                 help="only run tests whose ada files contain this pattern")
    m.add_option("--inverse-prover",
                 dest="inverse_prover",
                 action="store_true",
                 default=False,
                 help="inverse order of default provers")
    m.add_option("--vc-timeout",
                 dest="vc_timeout",
                 action="store",
                 type="int",
                 help="set timeout for prover")
    m.add_option("--cache",
                 dest="cache",
                 action="store_true",
                 default=False,
                 help="use memcached to speed up testsuite")
    m.parse_args()

    if m.args:
        m.options.run_test = m.args[0]
        print "Running only test '%s'" % m.options.run_test
    else:
        m.options.run_test = ""

    if m.options.discs:
        m.options.discs = m.options.discs.split(',')

    return m.options
Ejemplo n.º 12
0
def main():
    """Run the testsuite and generate reports"""
    # Parse the command lines options
    m = Main(add_targets_options=True)
    add_mainloop_options(m)
    add_run_test_options(m)
    m.add_option('--diffs',
                 dest='diffs',
                 action='store_true',
                 default=False,
                 help='show diffs on stdout')
    m.add_option("--old-result-dir",
                 type="string",
                 default=None,
                 help="Old result dir (to generate the report)")
    m.add_option('-b',
                 '--build-dir',
                 dest='build_dir',
                 help='separate PolyORB build directory')
    m.add_option('--testsuite-src-dir',
                 dest='testsuite_src_dir',
                 help='path to polyorb testsuite sources')
    m.add_option('--coverage',
                 dest='coverage',
                 action='store_true',
                 default=False,
                 help='generate coverage information')
    m.parse_args()

    # Various files needed or created by the testsuite
    results_file = m.options.output_dir + '/results'
    report_file = m.options.output_dir + '/report'

    if not m.options.failed_only:
        rm(m.options.output_dir, True)
        mkdir(m.options.output_dir)

    # Add current directory in PYTHONPATH (to find test_utils.py)
    env = Env()
    env.add_search_path('PYTHONPATH', os.path.join(os.getcwd(), 'tests'))
    fixed_support_dir = os.path.join(os.getcwd(), 'fixed_support_dir')
    env.add_search_path('FIXED_SUPPORT_DIR', fixed_support_dir)
    env.add_path(os.path.join(fixed_support_dir))
    env.add_path('.')  # many tests expect '.' in the PATH

    # Avoid extra debug traces
    os.environ['POLYORB_LOG_DEFAULT'] = 'error'

    # Generate the discs list for test.opt parsing
    # Always add 'ALL'
    common_discs = Env().discriminants

    # Be backward compatible with the old IDL tests
    # Set the polyorb discriminant and export the IDLCOMP
    # environment variable.
    common_discs.append('PolyORB')
    common_discs.append('PolyORB_IAC')
    os.environ['IDLCOMP'] = 'iac'

    # Retrieve also the polyorb specific discriminants
    p = Run([
        which('bash'),
        which('polyorb-config').replace('\\', '/'), '--config'
    ])

    # First find the support application perso.
    match = re.search('Application *personalities *: (.+)', p.out)
    if match is not None:
        common_discs += ['app_%s' % k for k in match.group(1).split()]

    # Then the supported protocols
    match = re.search('Protocol *personalities *: (.+)', p.out)
    if match is not None:
        common_discs += ['proto_%s' % k for k in match.group(1).split()]

    # Then the supported services
    match = re.search('Services *: (.+)', p.out)
    if match is not None:
        common_discs += ['serv_%s' % k for k in match.group(1).split()]

    # Do we have ssl support ?
    if re.search('SSL *support *: *yes', p.out):
        common_discs.append('ssl_support')

    with open(m.options.output_dir + '/discs', 'w') as f_disk:
        f_disk.write(", ".join(common_discs))

    # Expand ~ and ~user contructions for user PATH
    if m.options.build_dir is None:
        m.options.build_dir = os.path.join(os.getcwd(), os.pardir)
    else:
        m.options.build_dir = os.path.expanduser(m.options.build_dir)

    if m.options.testsuite_src_dir is None:
        m.options.testsuite_src_dir = os.path.join(os.getcwd())
    else:
        m.options.testsuite_src_dir = os.path.expanduser(
            m.options.testsuite_src_dir)

    # Compute the test list
    if m.args:
        test_glob = m.args[0]
    else:
        test_glob = None
    test_list = filter_list('./tests/*/*/*/test.py', test_glob)
    if os.path.isdir('regtests'):
        test_list.extend(filter_list('./regtests/*/test.*', test_glob))

    collect_result = generate_collect_result(m.options.output_dir,
                                             results_file, m.options.diffs)
    run_testcase = generate_run_testcase('tests/run-test.py', common_discs,
                                         m.options)

    os.environ['TEST_CONFIG'] = os.path.join(os.getcwd(), 'env.dump')
    env.options = m.options
    env.log_dir = os.path.join(os.getcwd(), 'log')
    env.store(os.environ['TEST_CONFIG'])

    if len(test_list) == 0:
        logger.error("No matching test found")
        return

    MainLoop(test_list, run_testcase, collect_result, m.options.mainloop_jobs)

    # Generate the report file
    ReportDiff(m.options.output_dir,
               m.options.old_result_dir).txt_image(report_file)