Exemplo n.º 1
0
def run_testsuite(test_driver):
    """Run the testsuite

    PARAMETERS
      test_driver: path to the test driver (e.g. lib/python/run-test)
    """
    options = __parse_options()
    env = Env()

    test_list = [
        t for t in filter_list('tests/*', options.run_test) if os.path.isdir(t)
    ]

    # Various files needed or created by the testsuite
    setup_result_dir(options)

    discs = env.discriminants

    if options.discs:
        discs += options.discs

    run_testcase = generate_run_testcase(test_driver, discs, options)
    collect_result = generate_collect_result(options.output_dir,
                                             options.results_file,
                                             options.view_diffs)

    MainLoop(test_list, run_testcase, collect_result, options.mainloop_jobs)

    # Write report
    with open(options.output_dir + '/discs', 'w') as discs_f:
        discs_f.write(" ".join(discs))
    ReportDiff(options.output_dir,
               options.old_output_dir).txt_image(options.report_file)
Exemplo n.º 2
0
def main():
    """Run the testsuite"""

    m = Main()
    add_mainloop_options(m, extended_options=True)
    add_run_test_options(m)
    m.add_option("--diffs",
                 dest="view_diffs",
                 action="store_true",
                 default=False,
                 help="show diffs on stdout")
    m.parse_args()

    # Various files needed or created by the testsuite
    # creates :
    #   the ouput directory (out by default)
    #   the report file
    #   the results file

    setup_result_dir(m.options)

    if m.args:
        test_list = [t.strip('/') for t in m.args]
    else:
        test_list = sorted(glob('tests/*'))

    env = Env()

    # add support module path
    python_lib = os.path.join(os.getcwd(), 'lib', 'python')
    Env().add_search_path("PYTHONPATH", python_lib)

    env.add_search_path('PYTHONPATH', os.getcwd())
    discs = [env.target.platform]

    if m.options.discs:
        discs += m.options.discs.split(',')

    collect_result = generate_collect_result(m.options.output_dir,
                                             m.options.results_file,
                                             m.options.view_diffs)

    run_testcase = generate_run_testcase(python_lib + '/run-test', discs,
                                         m.options)

    MainLoop(test_list, run_testcase, collect_result, m.options.mainloop_jobs)
    # Generate the report file
    ReportDiff(m.options.output_dir,
               m.options.old_output_dir).txt_image(m.options.report_file)
Exemplo n.º 3
0
    def parse_command_line(self):
        """Handle command-line parsing and internal configuration."""

        self.main.parse_args()

        self.formatter = get_formatter_by_name(
            'terminal256' if self.main.options.colorize else 'null',
            style=GNAThubOutputStyle, encoding='utf-8')

        self.testcases = Testsuite.compute_testcases_list(self.main.args)
        self.testcases = sorted(self.testcases, key=lambda s: s.lower())

        if self.main.options.discs:
            self.discs.extend(self.main.options.discs.split(','))

        setup_result_dir(self.main.options)
Exemplo n.º 4
0
def main():
    """Run the testsuite.
    """
    m = Main()
    add_mainloop_options(m, extended_options=True)
    add_run_test_options(m)
    m.add_option("--diffs",
                 dest="view_diffs",
                 action="store_true",
                 default=False,
                 help="show diffs on stdout")
    m.parse_args()

    # Create a tmp directory for the entire testsuite, to make sure
    # that, should the git hooks leak any file/directories, we can
    # (1) detect them, and (2) delete them.
    #
    # This requires some extra work to make sure that the scripts
    # being tested do actually use them, but this needs to be done
    # by each testcase, because we want each testcase to have its
    # own tmp directory (allowing for concurrency).  We pass that
    # information to the testcase through the GIT_HOOKS_TESTSUITE_TMP
    # environment variable.
    m.options.tmp = mkdtemp('', 'git-hooks-TS-', m.options.tmp)
    os.environ['GIT_HOOKS_TESTSUITE_TMP'] = m.options.tmp

    try:
        testcases = get_testcases(m.args)
        setup_result_dir(m.options)

        # We do not need discriminants in this testsuite at the moment.
        discs = None

        metrics = {}
        collect_result = generate_collect_result(metrics=metrics,
                                                 options=m.options)
        run_testcase = generate_run_testcase('bin/run-testcase', discs,
                                             m.options)

        MainLoop(testcases, run_testcase, collect_result,
                 m.options.mainloop_jobs)
        print_testsuite_results_summary(metrics)
    finally:
        rm(m.options.tmp, recursive=True)
Exemplo n.º 5
0
def main():
    """Run the testsuite"""

    m = Main()
    add_mainloop_options(m, extended_options=True)
    add_run_test_options(m)
    m.add_option("--diffs",
                 dest="view_diffs",
                 action="store_true",
                 default=False,
                 help="show diffs on stdout")
    m.parse_args()

    # Various files needed or created by the testsuite
    # creates :
    #   the ouput directory (out by default)
    #   the report file
    #   the results file

    setup_result_dir(m.options)

    if m.args:
        test_list = [t.strip('/') for t in m.args]
    else:
        test_list = sorted(glob('tests/*'))

    env = Env()

    # add support module path
    python_lib = os.path.join(os.getcwd(), 'lib', 'python')
    Env().add_search_path("PYTHONPATH", python_lib)

    env.add_search_path('PYTHONPATH', os.getcwd())
    discs = [env.target.platform]

    if m.options.discs:
        discs += m.options.discs.split(',')

    test_metrics = {'total': len(test_list), 'uok': 0, 'invalid': 0}

    # Generate a standard 'collect_result' function...
    generated_collect_result = generate_collect_result(
        result_dir=m.options.output_dir,
        results_file=m.options.results_file,
        output_diff=m.options.view_diffs,
        metrics=test_metrics)

    # ... and then wrap that generated 'collect_result' function in something
    # that will also accumulate 'UOK' test results and failed tests
    def collect_test_metrics(name, process, _job_info):
        generated_collect_result(name, process, _job_info)
        test_name = os.path.basename(name)
        test_result = split_file(m.options.output_dir + '/' + test_name +
                                 '.result',
                                 ignore_errors=True)
        if test_result:
            test_status = test_result[0].split(':')[0]
            if test_status == 'UOK':
                test_metrics['uok'] += 1
            elif test_status == 'INVALID_TEST':
                test_metrics['invalid'] += 1

    run_testcase = generate_run_testcase('run-test', discs, m.options)

    MainLoop(test_list, run_testcase, collect_test_metrics,
             m.options.mainloop_jobs)

    print "Summary: Ran %(run)s/%(total)s tests, with %(failed)s failed, %(crashed)s crashed, %(uok)s unexpectedly passed, %(invalid)s invalid." % test_metrics

    # Generate the report file
    ReportDiff(m.options.output_dir,
               m.options.old_output_dir).txt_image(m.options.report_file)

    if (test_metrics['failed'] > 0 or test_metrics['crashed'] > 0
            or test_metrics['uok'] > 0 or test_metrics['invalid'] > 0):
        sys.exit(1)
Exemplo n.º 6
0
    def __init__(self, options):
        """Fill the test lists"""

        # Various files needed or created by the testsuite
        setup_result_dir(options)
        self.options = options

        # Always add ALL and target info
        self.discs = ['ALL'] + Env().discriminants
        if Env().target.os.name == 'vxworks6':
            self.discs.append('vxworks')

        if options.discs:
            self.discs += options.discs.split(',')

        if options.with_gdb:
            # Serialize runs and disable gprof
            options.mainloop_jobs = 1
            options.with_gprof = False

        # Read discriminants from testsuite.tags
        # The file testsuite.tags should have been generated by
        # AWS 'make setup'
        try:
            with open('testsuite.tags') as tags_file:
                self.discs += tags_file.read().strip().split()
        except IOError:
            sys.exit("Cannot find testsuite.tags. Please run make setup")

        if options.from_build_dir:
            os.environ["ADA_PROJECT_PATH"] = os.getcwd()
            # Read makefile.setup to set proper build environment
            c = MakeVar('../makefile.setup')
            os.environ["PRJ_BUILD"] = c.get("DEBUG", "true", "Debug",
                                            "Release")
            os.environ["PRJ_XMLADA"] = c.get("XMLADA", "true", "Installed",
                                             "Disabled")
            os.environ["PRJ_ASIS"] = c.get("ASIS", "true", "Installed",
                                           "Disabled")
            os.environ["PRJ_LDAP"] = c.get("LDAP", "true", "Installed",
                                           "Disabled")
            os.environ["PRJ_SOCKLIB"] = c.get("IPv6", "true", "IPv6", "GNAT")
            os.environ["SOCKET"] = c.get("SOCKET")
            os.environ["LIBRARY_TYPE"] = "static"
            # from-build-dir only supported on native platforms
            os.environ["PLATFORM"] = "native"
            # Add current tools in from of PATH
            os.environ["PATH"] = os.getcwd() + os.sep + ".." + os.sep \
                + ".build" + os.sep + os.environ["PLATFORM"] \
                + os.sep + os.environ["PRJ_BUILD"].lower() \
                + os.sep + "static" + os.sep + "tools" \
                + os.pathsep + os.environ["PATH"]

        logging.debug(
            "Running the testsuite with the following discriminants: %s" %
            ", ".join(self.discs))

        # Add current directory in PYTHONPATH (to find test_support.py)
        Env().add_search_path('PYTHONPATH', os.getcwd())
        os.environ["TEST_CONFIG"] = os.path.join(os.getcwd(), 'env.dump')

        Env().testsuite_config = options
        Env().store(os.environ["TEST_CONFIG"])

        # Save discriminants
        with open(options.output_dir + "/discs", "w") as discs_f:
            discs_f.write(" ".join(self.discs))
Exemplo n.º 7
0
def run_testsuite(test_driver):
    """Run the testsuite

    PARAMETERS
      test_driver: path to the test driver (e.g. lib/python/run-test)
    """
    options = __parse_options()
    env = Env()

    if options.vc_timeout:
        os.environ["vc_timeout"] = str(options.vc_timeout)
    if options.debug:
        os.environ["debug"] = "true"
    if options.verbose:
        os.environ["verbose"] = "true"
    if options.inverse_prover:
        os.environ["inverse_prover"] = "true"
    if options.benchmarks:
        os.environ["benchmarks"] = "true"
    if options.cache:
        os.environ["cache"] = "true"

    if options.test_list:
        with open(options.test_list, 'r') as f:
            test_list = f.readlines()
            test_list =\
                map(lambda s: os.path.join("tests", s.strip()), test_list)
            test_list = [t for t in test_list if os.path.isdir(t)]
    elif options.exact_name:
        test_name = os.path.join('tests/', options.run_test)
        if os.path.isdir(test_name):
            test_list = [test_name]
        else:
            print 'error: test \'' + options.run_test + '\' not found'
            exit(1)
    elif options.pattern:
        test_list = filter_list('tests/*')
        reg = re.compile(options.pattern)
        test_list = [
            test for test in test_list if test_contains_pattern(test, reg)
        ]
    else:
        test_list = [
            t for t in filter_list('tests/*', options.run_test)
            if os.path.isdir(t)
        ]

    # Various files needed or created by the testsuite
    setup_result_dir(options)

    discs = env.discriminants

    if options.discs:
        discs += options.discs

    run_testcase = generate_run_testcase(test_driver, discs, options)
    collect_result = generate_collect_result(options.output_dir,
                                             options.results_file,
                                             options.view_diffs)

    MainLoop(test_list, run_testcase, collect_result, options.mainloop_jobs)

    # Write report
    with open(options.output_dir + '/discs', 'w') as discs_f:
        discs_f.write(" ".join(discs))
    ReportDiff(options.output_dir,
               options.old_output_dir).txt_image(options.report_file)
Exemplo n.º 8
0
    def __init__(self, options):
        """Fill the test lists"""

        # Various files needed or created by the testsuite
        setup_result_dir(options)
        self.options = options

        # Always add ALL and target info
        self.discs = ['ALL'] + Env().discriminants
        if Env().target.os.name == 'vxworks6':
            self.discs.append('vxworks')

        if options.discs:
            self.discs += options.discs.split(',')

        if options.with_gdb:
            # Serialize runs and disable gprof
            options.mainloop_jobs = 1
            options.with_gprof = False

        # Read discriminants from testsuite.tags
        # The file testsuite.tags should have been generated by
        # AWS 'make setup'
        try:
            with open('testsuite.tags') as tags_file:
                self.discs += tags_file.read().strip().split()
        except IOError:
            sys.exit("Cannot find testsuite.tags. Please run make setup")

        if options.from_build_dir:
            os.environ["ADA_PROJECT_PATH"] = os.getcwd()
            # Read makefile.setup to set proper build environment
            c = MakeVar('../makefile.setup')
            os.environ["PRJ_BUILD"] = c.get(
                "DEBUG", "true", "Debug", "Release")
            os.environ["PRJ_XMLADA"] = c.get(
                "XMLADA", "true", "Installed", "Disabled")
            os.environ["PRJ_ASIS"] = c.get(
                "ASIS", "true", "Installed", "Disabled")
            os.environ["PRJ_LDAP"] = c.get(
                "LDAP", "true", "Installed", "Disabled")
            os.environ["PRJ_SOCKLIB"] = c.get(
                "IPv6", "true", "IPv6", "GNAT")
            os.environ["SOCKET"] = c.get("SOCKET")
            os.environ["LIBRARY_TYPE"] = "static"
            # from-build-dir only supported on native platforms
            os.environ["PLATFORM"] = "native"
            # Add current tools in from of PATH
            os.environ["PATH"] = os.getcwd() + os.sep + ".." + os.sep \
                + ".build" + os.sep + os.environ["PLATFORM"] \
                + os.sep + os.environ["PRJ_BUILD"].lower() \
                + os.sep + "static" + os.sep + "tools" \
                + os.pathsep + os.environ["PATH"]

        logging.debug(
            "Running the testsuite with the following discriminants: %s"
            % ", ".join(self.discs))

        # Add current directory in PYTHONPATH (to find test_support.py)
        Env().add_search_path('PYTHONPATH', os.getcwd())
        os.environ["TEST_CONFIG"] = os.path.join(os.getcwd(), 'env.dump')

        Env().testsuite_config = options
        Env().store(os.environ["TEST_CONFIG"])

        # Save discriminants
        with open(options.output_dir + "/discs", "w") as discs_f:
            discs_f.write(" ".join(self.discs))