Ejemplo n.º 1
0
    def tear_up(self):
        bin_dir = 'bin_dev'
        os.environ['SHELL'] = os.path.join(self.main.options.with_gsh, bin_dir,
                                           'gsh' + Env().build.os.exeext)

        Env().add_path(os.path.join(self.main.options.with_gsh, 'bin'))
        Env().add_path(os.path.dirname(os.environ['SHELL']))
Ejemplo n.º 2
0
    def parse_args(self, args=None):
        """Parse options and set console logger.

        :param args: the list of positional parameters. If None then
            ``sys.argv[1:]`` is used
        :type: list[str] | None
        """
        levels = {
            'RAW': gnatpython.logging_util.RAW,
            'DEBUG': logging.DEBUG,
            'INFO': logging.INFO,
            'ERROR': logging.ERROR,
            'CRITICAL': logging.CRITICAL
        }

        (self.options,
         self.args) = self.__parse_proxy.parse_args(self.option_parser, args)

        if not self.__log_handlers_set:
            # First set level of verbosity
            if self.options.verbose:
                level = gnatpython.logging_util.RAW
            else:
                level = levels.get(self.options.loglevel, logging.INFO)

            # Set logging handlers
            default_format = '%(levelname)-8s %(message)s'
            handler = gnatpython.logging_util.add_handlers(
                level=level, format=default_format)[0]

            if self.formatter is not None:
                default_format = self.formatter

            if self.options.enable_color:
                handler.setFormatter(ConsoleColorFormatter(default_format))
            else:
                if self.formatter is not None:
                    handler.setFormatter(logging.Formatter(self.formatter))

            # Log to a file if necessary
            if self.options.logfile != "":
                handler = gnatpython.logging_util.add_handlers(
                    level=gnatpython.logging_util.RAW,
                    format='%(asctime)s: %(name)-24s: '
                    '%(levelname)-8s %(message)s',
                    filename=self.options.logfile)

            self.__log_handlers_set = True

        # Export options to env
        e = Env()
        e.main_options = self.options

        if hasattr(self.options, "gnatpython_main_target_options_supported"):
            # Handle --target, --host and --build options
            e.set_env(self.options.build, self.options.host,
                      self.options.target)
Ejemplo n.º 3
0
    def start(self, tests, show_diffs=False, old_result_dir=None):
        """Start the testsuite"""
        # Generate the testcases list
        if tests:
            # tests parameter can be a file containing a list of tests
            if len(tests) == 1 and os.path.isfile(tests[0]):
                with open(tests[0]) as _list:
                    tests = [t.strip().split(':')[0] for t in _list]
            else:
                # user list of tests, ignore tailing / to be able to use
                # file completion
                tests = [t.rstrip('/') for t in tests]
        else:
            # Get all tests.py
            tests = [os.path.dirname(t) for t in sorted(glob('*/test.py'))]

        if not Env().testsuite_config.with_Z999:
            # Do not run Z999 test
            tests = [t for t in tests if t != 'Z999_xfail']

        test_metrics = {'total': len(tests)}

        # Run the main loop
        collect_result = generate_collect_result(
            options=self.options,
            output_diff=show_diffs,
            metrics=test_metrics)
        run_testcase = generate_run_testcase('run-test', self.discs,
                                             Env().testsuite_config)
        MainLoop(tests, run_testcase, collect_result,
                 Env().testsuite_config.mainloop_jobs)

        if self.options.retry_threshold > 0:
            # Set skip if ok and run the testsuite if mainloop_jobs set to 1
            # to avoid parallelism problems on the tests that have previously
            # failed.
            if test_metrics['failed'] < self.options.retry_threshold:
                logging.warning("%d tests have failed (threshold was %d)."
                                " Retrying..."
                                % (test_metrics['failed'],
                                   self.options.retry_threshold))

                # Regenerate collect_result function
                self.options.skip_if_ok = True
                self.options.skip_if_dead = True
                collect_result = generate_collect_result(
                    options=self.options,
                    output_diff=show_diffs,
                    metrics=test_metrics)
                MainLoop(tests, run_testcase, collect_result, 1)
            else:
                logging.error("Too many errors")

        # Write report
        ReportDiff(self.options.output_dir,
                   self.options.old_output_dir).txt_image(
                       self.options.report_file)
Ejemplo n.º 4
0
def main():
    """Run the testsuite"""
    options = __parse_options()
    assert os.path.exists(makedir("bin")), \
        "cannot find %s directory" % makedir("bin")
    assert os.path.exists(makedir("rbin")), \
        "cannot find %s directory" % makedir("rbin")
    env = Env()
    env.add_search_path("PYTHONPATH", os.getcwd())

    test_list = [
        t for t in filter_list('tests/*', options.run_test) if os.path.isdir(t)
    ]

    # Various files needed or created by the testsuite
    result_dir = options.output_dir
    results_file = result_dir + '/results'

    if os.path.exists(result_dir):
        rm(result_dir, True)

    mkdir(result_dir)

    discs = env.discriminants

    if options.discs:
        discs += options.discs

    def test_build_cmd(test, _):
        """Run the given test"""
        cmd = [
            sys.executable, 'run-test', '-d', ",".join(discs), '-o',
            result_dir, '-t', options.tmp, test
        ]
        if options.verbose:
            cmd.append('-v')
        if options.host:
            cmd.append('--host=' + options.host)
        if options.target:
            cmd.append('--target=' + options.target)
        if not options.enable_cleanup:
            cmd.append('--disable-cleanup')
        return Run(cmd, bg=True)

    collect_result = generate_collect_result(result_dir, results_file,
                                             options.view_diffs)

    MainLoop(test_list, test_build_cmd, collect_result, options.mainloop_jobs)

    # Write report
    with open(result_dir + '/discs', 'w') as discs_f:
        discs_f.write(" ".join(discs))
    ReportDiff(result_dir,
               options.old_result_dir).txt_image(result_dir + '/report')
Ejemplo n.º 5
0
def main():
    """Run the testsuite"""
    options = __parse_options()
    assert os.path.exists(makedir("bin")), \
        "cannot find %s directory" % makedir("bin")
    assert os.path.exists(makedir("rbin")), \
        "cannot find %s directory" % makedir("rbin")
    env = Env()
    env.add_search_path("PYTHONPATH", os.getcwd())

    test_list = [t for t in filter_list('tests/*', options.run_test)
                 if os.path.isdir(t)]

    # Various files needed or created by the testsuite
    result_dir = options.output_dir
    results_file = result_dir + '/results'

    if os.path.exists(result_dir):
        rm(result_dir, True)

    mkdir(result_dir)

    discs = env.discriminants

    if options.discs:
        discs += options.discs

    def test_build_cmd(test, _):
        """Run the given test"""
        cmd = [sys.executable, 'run-test',
               '-d', ",".join(discs),
               '-o', result_dir,
               '-t', options.tmp,
               test]
        if options.verbose:
            cmd.append('-v')
        if options.host:
            cmd.append('--host=' + options.host)
        if options.target:
            cmd.append('--target=' + options.target)
        if not options.enable_cleanup:
            cmd.append('--disable-cleanup')
        return Run(cmd, bg=True)

    collect_result = generate_collect_result(
        result_dir, results_file, options.view_diffs)

    MainLoop(test_list, test_build_cmd, collect_result, options.mainloop_jobs)

    # Write report
    with open(result_dir + '/discs', 'w') as discs_f:
        discs_f.write(" ".join(discs))
    ReportDiff(result_dir, options.old_result_dir).txt_image(
        result_dir + '/report')
Ejemplo n.º 6
0
def set_python_env(prefix):
    """Set environment for a Python distribution.

    :param prefix: root directory of the python distribution
    :type prefix: str
    """
    env = Env()
    if sys.platform == 'win32':
        env.add_path(prefix)
        env.add_path(os.path.join(prefix, 'Scripts'))
    else:
        env.add_path(os.path.join(prefix, 'bin'))
        env.add_dll_path(os.path.join(prefix, 'lib'))
Ejemplo n.º 7
0
def build(prj):
    """Compile a project with gprbuild"""
    cmd = ["gprbuild"]
    if Env().is_cross:
        cmd.append("--target=" + Env().target.triplet)
        if Env().target.os.name.startswith('vxworks'):
            cmd.append('-XPLATFORM=vxworks')
    cmd = cmd + ["-p", "-gnat2012", "-P" + prj, "-bargs", "-E"]
    if Env().testsuite_config.with_gprof:
        cmd = cmd + ["-cargs", "-pg", "-O2", "-largs", "-pg"]
    process = Run(cmd)
    if process.status:
        #  Exit with error
        logging.error(process.out)
Ejemplo n.º 8
0
def main():
    """Run the testsuite"""

    m = Main()
    add_mainloop_options(m, extended_options=True)
    add_run_test_options(m)
    m.add_option("--diffs",
                 dest="view_diffs",
                 action="store_true",
                 default=False,
                 help="show diffs on stdout")
    m.parse_args()

    # Various files needed or created by the testsuite
    # creates :
    #   the ouput directory (out by default)
    #   the report file
    #   the results file

    setup_result_dir(m.options)

    if m.args:
        test_list = [t.strip('/') for t in m.args]
    else:
        test_list = sorted(glob('tests/*'))

    env = Env()

    # add support module path
    python_lib = os.path.join(os.getcwd(), 'lib', 'python')
    Env().add_search_path("PYTHONPATH", python_lib)

    env.add_search_path('PYTHONPATH', os.getcwd())
    discs = [env.target.platform]

    if m.options.discs:
        discs += m.options.discs.split(',')

    collect_result = generate_collect_result(m.options.output_dir,
                                             m.options.results_file,
                                             m.options.view_diffs)

    run_testcase = generate_run_testcase(python_lib + '/run-test', discs,
                                         m.options)

    MainLoop(test_list, run_testcase, collect_result, m.options.mainloop_jobs)
    # Generate the report file
    ReportDiff(m.options.output_dir,
               m.options.old_output_dir).txt_image(m.options.report_file)
Ejemplo n.º 9
0
    def SCOV_CARGS(options):
        """
        Compilation args needed by tests exercising source coverage, passed by
        default to build invocations issued through the gprbuild() API.
        """

        # Working from binary traces relies on specific properties:

        if options.trace_mode == 'bin':

            # Critical conditional branches must be preserved, source
            # coverage obligations need to be generated by the compiler,
            # and debug info is needed.

            cargs = ["-fpreserve-control-flow", "-fdump-scos", "-g"]

            # Proper support of inlining or generics requires advanced debug
            # info features possibly disabled by default on some targets.  We
            # could enforce this with an explicit option unconditionally, but
            # it is cleaner and simpler to have this exposed only when needed
            # in qualification material.

            if 'vxworks' in Env().target.triplet:
                cargs.append("-gno-strict-dwarf")

            return cargs

        else:
            return []
Ejemplo n.º 10
0
    def execute(self):
        """Run the testsuite and execute testcases."""

        # Add the support directory in the PYTHONPATH so that modules are
        # accessible from each test case.
        Env().add_search_path('PYTHONPATH', os.path.dirname(const.basedir))

        self.parse_command_line()

        self.testcase_runner = generate_run_testcase(
            os.path.join(BASEDIR, 'run-test'),
            self.discs, self.main.options)

        MainLoop(self.testcases,
                 self.testcase_runner,
                 self.collect_result,
                 self.main.options.mainloop_jobs)

        # Generate the report file
        diff = ReportDiff(
            self.main.options.output_dir,
            self.main.options.old_output_dir
        )
        diff.txt_image(self.main.options.report_file)
        self.log(self.format_testsuite_summary())
Ejemplo n.º 11
0
def run_testsuite(test_driver):
    """Run the testsuite

    PARAMETERS
      test_driver: path to the test driver (e.g. lib/python/run-test)
    """
    options = __parse_options()
    env = Env()

    test_list = [
        t for t in filter_list('tests/*', options.run_test) if os.path.isdir(t)
    ]

    # Various files needed or created by the testsuite
    setup_result_dir(options)

    discs = env.discriminants

    if options.discs:
        discs += options.discs

    run_testcase = generate_run_testcase(test_driver, discs, options)
    collect_result = generate_collect_result(options.output_dir,
                                             options.results_file,
                                             options.view_diffs)

    MainLoop(test_list, run_testcase, collect_result, options.mainloop_jobs)

    # Write report
    with open(options.output_dir + '/discs', 'w') as discs_f:
        discs_f.write(" ".join(discs))
    ReportDiff(options.output_dir,
               options.old_output_dir).txt_image(options.report_file)
Ejemplo n.º 12
0
def get_discriminants():
    """
    If GNATpython is available, use it to return the list of discriminants
    asociated with the curent context (target, host, etc.). Otherwise, return
    an empty list of discriminants.

    :rtype: list[str]
    """
    return Env().discriminants if Env else []
Ejemplo n.º 13
0
 def create(self, output_dir):
     mkdir(output_dir)
     executable = os.path.join(output_dir, self.name)
     if Env().build.os.name == 'windows':
         # On Windows, suffixes executables by .exe
         executable += '.exe'
     with open(executable, 'w') as fd:
         fd.write(self._generate_code() % {
             'project_name': self.project.name,
             'project_root': self.project.install_dir
         })
         fd.write('\n')
     chmod('+x', executable)
Ejemplo n.º 14
0
        def diff(self, rev1=None, rev2=None):
            """Return the local changes in the checkout.

            :param rev1: start diff from this revision
            :type rev1: int
            :param rev2: stop diff at this revision
            :type rev1: int

            :return: the diff content
            :rtype: str
            """
            try:
                if rev1 and rev2:
                    result = self.client.diff(
                        Env().tmp_dir, self.dest,
                        pysvn.Revision(rev1), pysvn.Revision(rev2))
                else:
                    result = self.client.diff(Env().tmp_dir, self.dest)
                return result
            except pysvn.ClientError as e:
                self.error("subversion diff failure: %s" % e.args[0],
                           traceback=sys.exc_traceback)
Ejemplo n.º 15
0
    def get_test_list(self, sublist):
        """Retrieve the list of tests.

        The default method looks for all test.yaml files in the test
        directory. If a test.yaml has a variants field, the test is expanded
        in several test, each test being associated with a given variant.

        This function may be overriden.
        At this stage the self.global_env (after update by the tear_up
        procedure) is available.

        :param sublist: a list of tests scenarios or patterns
        :type sublist: list[str]
        :return: the list of selected test
        :rtype: list[str]
        """
        # First retrive the list of test.yaml files
        result = [
            os.path.relpath(p, self.test_dir).replace('\\', '/')
            for p in find(self.test_dir, 'test.yaml')
        ]
        if sublist:
            filtered_result = []
            path_selectors = [
                os.path.relpath(os.path.abspath(s),
                                self.test_dir).replace('\\', '/')
                for s in sublist
            ]
            for p in result:
                for s in path_selectors:
                    if re.match(s, p):
                        filtered_result.append(p)
                        continue

            result = filtered_result

        # For each of them look for a variants field
        expanded_result = []
        for test in result:
            test_env = load_with_config(os.path.join(self.test_dir, test),
                                        Env().to_dict())

            if test_env and 'variants' in test_env:
                for variant in test_env['variants']:
                    expanded_result.append("%s|%s" %
                                           (test, yaml.dump(variant)))
            else:
                expanded_result.append(test)

        return expanded_result
Ejemplo n.º 16
0
def local(cmd, config_file, args=None):
    """Run a local test

    Execute the given command.
    Check for "END TESTS................   PASSED"
    if found return True

    PARAMETERS:
        cmd: the command to execute
        config_file: to set POLYORB_CONF
        args: list of additional parameters
    """
    args = args or []
    print "Running %s %s (config=%s)" % (cmd, " ".join(args), config_file)
    if config_file:
        assert_exists(os.path.join(CONF_DIR, config_file))
    os.environ[POLYORB_CONF] = config_file

    command = add_extension(os.path.join(BASE_DIR, cmd))
    assert_exists(command)

    p_cmd = [command] + args

    if VERBOSE:
        if config_file:
            print 'RUN: POLYORB_CONF=%s %s' % (config_file, " ".join(p_cmd))
        else:
            print 'RUN: %s' % " ".join(p_cmd)

    Run(make_run_cmd(p_cmd,
                     Env().options.coverage),
        output=OUTPUT_FILENAME + 'local',
        error=STDOUT,
        timeout=RLIMIT)
    if Env().options.coverage:
        run_coverage_analysis(command)
    return _check_output(OUTPUT_FILENAME + 'local')
Ejemplo n.º 17
0
    def control_vars(self):
        """
        Return a dictionary to contain all values available in the control
        expressions (see the `run_control` method).

        :rtype: dict[str, object]
        """
        env = Env()
        return {
            'env': env,

            # Shortcuts to test the build OS
            'darwin': env.build.os.name == 'darwin',
            'linux': env.build.os.name == 'linux',
            'windows': env.build.os.name == 'windows',
        }
Ejemplo n.º 18
0
def highlight(string, fg=COLOR_UNCHANGED, bg=COLOR_UNCHANGED):
    """Return a version of string with color highlighting applied to it.

    This is suitable for display on a console. Nothing is done if color
    has been disabled
    """
    if not Env().main_options.enable_color:
        return string
    else:
        if bg == COLOR_UNCHANGED:
            colors = "%d" % (30 + fg, )
        elif fg == COLOR_UNCHANGED:
            colors = "%d" % (40 + fg, )
        else:
            colors = "%d;%d" % (40 + bg, 30 + fg)
        return "\033[%sm%s\033[m" % (colors, string)
Ejemplo n.º 19
0
def altergo(src, timeout=10, opt=None):
    """Invoke alt-ergo with why3-cpulimit wrapper

    PARAMETERS
      src: VC file to process
      timeout: timeout passed to why3-cpulimit
      opt: additional command line options for alt-ergo
    """
    # add libexec/spark/bin to the PATH
    installdir = spark_install_path()
    bindir = os.path.join(installdir, 'libexec', 'spark', 'bin')
    Env().add_path(bindir)
    # run alt-ergo
    cmd = ["alt-ergo", "-steps-bound", "20000"]
    cmd += to_list(opt)
    cmd += [src]
    process = Run(cmd)
    print process.out
Ejemplo n.º 20
0
    def run(self):
        # If we are requested to run with a fake toolchain, set it up now
        if self.fake_ada_target:
            fake_dir = self.working_dir('fake-ada')
            create_fake_ada_compiler(comp_dir=fake_dir,
                                     comp_target=self.fake_ada_target,
                                     gnat_version="21.0w",
                                     gcc_version="8.4.3",
                                     runtimes=["rtp"],
                                     comp_is_cross=True)
            Env().add_path(os.path.join(fake_dir, 'bin'))

        # Build the program and run it
        self.run_and_check([
            'gprbuild', '-g1', '-q', '-p', '-P', self.project_file, '-bargs',
            '-Es'
        ])
        self.run_and_check((['valgrind', '-q'] if self.global_env['options'].
                            valgrind else []) +
                           [os.path.join('.', self.main_program)])
Ejemplo n.º 21
0
    def __init__(self):
        self.duration = 0
        self.summary = defaultdict(lambda: 0)

        self.formatter = None
        self.testcases = None

        self.testcase_runner = None

        self.env = Env()
        self.discs = [self.env.target.platform]

        self.main = Main()
        add_mainloop_options(self.main, extended_options=True)
        add_run_test_options(self.main)

        self.main.add_option('--with-diff', action='store_true', default=False,
                             help='show diffs on stdout')

        self.main.add_option('--colorize', action='store_true',
                             default=False, help=argparse.SUPPRESS)
Ejemplo n.º 22
0
def run(bin, options=None, output_file=None):
    """Run a test"""
    if options is None:
        options = []
    if "TIMEOUT" in os.environ:
        timeout = int(os.environ["TIMEOUT"])
    else:
        timeout = 300

    if Env().is_cross:
        # Import gnatpython excross module only when needed
        from gnatpython.internal.excross import run_cross
        run_cross([bin + Env().target.os.exeext],
                  output=output_file,
                  timeout=timeout,
                  copy_files_on_target=[
                      '*.txt', '*.gz', '*.dat', '*.tmplt', '*.thtml', '*.html',
                      '*.ini', '*.types', '*.mime', '*.gif', '*.png'
                  ])
    else:
        if Env().testsuite_config.with_gdb:
            Run(["gdb", "--eval-command=run", "--batch-silent", "--args", bin]
                + options,
                output=output_file,
                timeout=timeout)
        elif Env().testsuite_config.with_valgrind:
            Run(["valgrind", "-q", "./" + bin] + options,
                output=output_file,
                timeout=timeout)
        else:
            Run(["./" + bin] + options, output=output_file, timeout=timeout)

    if Env().testsuite_config.with_gprof:
        Run(["gprof", bin] + options,
            output=os.path.join(Env().PROFILES_DIR,
                                "%s_%s_gprof.out" % (TEST_NAME, bin)))
Ejemplo n.º 23
0
 def COMMON_CARGS():
     return (["-g", "-fpreserve-control-flow", "-fdump-scos"] +
             BUILDER.__TARGET_CARGS_FOR(Env().target.triplet))
Ejemplo n.º 24
0
    def testsuite_main(self):
        """Main for the main testsuite script."""
        self.main = Main(add_targets_options=self.CROSS_SUPPORT)

        # Add common options
        add_mainloop_options(self.main)
        self.main.add_option("-o",
                             "--output-dir",
                             metavar="DIR",
                             default="./out",
                             help="select output dir")
        self.main.add_option("-t",
                             "--temp-dir",
                             metavar="DIR",
                             default=Env().tmp_dir)
        self.main.add_option(
            "--max-consecutive-failures",
            default=0,
            help="If there are more than N consecutive failures, the testsuite"
            " is aborted. If set to 0 (default) then the testsuite will never"
            " be stopped")
        self.main.add_option(
            "--keep-old-output-dir",
            default=False,
            action="store_true",
            help="This is default with this testsuite framework. The option"
            " is kept only to keep backward compatibility of invocation with"
            " former framework (gnatpython.testdriver)")
        self.main.add_option("--disable-cleanup",
                             dest="enable_cleanup",
                             action="store_false",
                             default=True,
                             help="disable cleanup of working space")
        self.main.add_option(
            "--show-error-output",
            action="store_true",
            help="When testcases fail, display their output. This is for"
            " convenience for interactive use.")
        self.main.add_option(
            "--dump-environ",
            dest="dump_environ",
            action="store_true",
            default=False,
            help="Dump all environment variables in a file named environ.sh,"
            " located in the output directory (see --output-dir). This"
            " file can then be sourced from a Bourne shell to recreate"
            " the environement that existed when this testsuite was run"
            " to produce a given testsuite report.")

        # Add user defined options
        self.add_options()

        # parse options
        self.main.parse_args()

        # At this stage compute commonly used paths
        # Keep the working dir as short as possible, to avoid the risk
        # of having a path that's too long (a problem often seen on
        # Windows, or when using WRS tools that have their own max path
        # limitations).
        # Note that we do make sure that working_dir is an absolute
        # path, as we are likely to be changing directories when
        # running each test. A relative path would no longer work
        # under those circumstances.
        d = os.path.abspath(self.main.options.output_dir)
        self.output_dir = os.path.join(d, 'new')
        self.old_output_dir = os.path.join(d, 'old')

        if not os.path.isdir(self.main.options.temp_dir):
            logging.critical("temp dir '%s' does not exist",
                             self.main.options.temp_dir)
            sys.exit(1)

        self.working_dir = tempfile.mkdtemp(
            '', 'tmp', os.path.abspath(self.main.options.temp_dir))

        # Create the new output directory that will hold the results
        self.setup_result_dir()

        # Store in global env: target information and common paths
        self.global_env['build'] = Env().build
        self.global_env['host'] = Env().host
        self.global_env['target'] = Env().target
        self.global_env['output_dir'] = self.output_dir
        self.global_env['working_dir'] = self.working_dir
        self.global_env['options'] = self.main.options

        # User specific startup
        self.tear_up()

        # Retrieve the list of test
        self.test_list = self.get_test_list(self.main.args)

        # Dump global_env so that it can be used by test runners
        with open(os.path.join(self.output_dir, 'global_env.yaml'),
                  'wb') as fd:
            fd.write(yaml.dump(self.global_env))

        # Launch the mainloop
        self.total_test = len(self.test_list)
        self.run_test = 0

        MainLoop(self.test_list, self.launch_test, self.collect_result)

        self.dump_testsuite_result()

        # Clean everything
        self.tear_down()
Ejemplo n.º 25
0
    def test_main(self):
        """Main function for the script in charge of running a single test.

        The script expect two parameters on the command line:

        * the output dir in which the results of the tests are saved
        * the path to the test.yaml file relative to the tests directory
        """
        self.output_dir = sys.argv[1]
        self.test_case_file, self.test_variant = \
            self.split_variant(sys.argv[2])

        logging.getLogger('').setLevel(RAW)
        add_handlers(
            level=RAW,
            format='%(asctime)s: %(name)-24s: '
            '%(levelname)-8s %(message)s',
            filename=os.path.join(
                self.output_dir,
                self.test_name(self.test_case_file, self.test_variant) +
                '.log'))

        with open(os.path.join(self.output_dir, 'global_env.yaml'),
                  'rb') as fd:
            self.global_env = yaml.load(fd.read())

        # Set target information
        Env().build = self.global_env['build']
        Env().host = self.global_env['host']
        Env().target = self.global_env['target']

        # Load testcase file
        self.test_env = load_with_config(
            os.path.join(self.test_dir, self.test_case_file),
            Env().to_dict())

        # Ensure that the test_env act like a dictionary
        if not isinstance(self.test_env, collections.Mapping):
            self.test_env = {
                'test_name': self.test_name(self.test_case_file,
                                            self.test_variant),
                'test_yaml_wrong_content': self.test_env
            }
            logger.error("abort test because of invalid test.yaml")
            self.dump_test_result(status="PROBLEM", msg="invalid test.yaml")
            return

        # Add to the test environment the directory in which the test.yaml is
        # stored
        self.test_env['test_dir'] = os.path.join(
            self.global_env['test_dir'], os.path.dirname(self.test_case_file))
        self.test_env['test_case_file'] = self.test_case_file
        self.test_env['test_variant'] = self.test_variant
        self.test_env['test_name'] = self.test_name(self.test_case_file,
                                                    self.test_variant)

        if 'driver' in self.test_env:
            driver = self.test_env['driver']
        else:
            driver = self.default_driver

        logger.debug('set driver to %s' % driver)
        if driver not in self.DRIVERS or \
                not issubclass(self.DRIVERS[driver], TestDriver):
            self.dump_test_result(status="PROBLEM", msg="cannot set driver")
            return

        try:
            instance = self.DRIVERS[driver](self.global_env, self.test_env)
        except Exception as e:
            error_msg = str(e)
            error_msg += "Traceback:\n"
            error_msg += "\n".join(traceback.format_tb(sys.exc_traceback))
            logger.error(error_msg)
            self.dump_test_result(status="PROBLEM",
                                  msg="exception during driver loading: %s" %
                                  str(e).split('\n')[0])
            return

        try:
            instance.tear_up()
            if instance.result.status == 'UNKNOWN':
                instance.run()
            if instance.result.status == 'UNKNOWN':
                instance.analyze()
        except Exception as e:
            error_msg = str(e)
            error_msg += "Traceback:\n"
            error_msg += "\n".join(traceback.format_tb(sys.exc_traceback))
            logger.error(error_msg)
            instance.result.set_status("PROBLEM",
                                       "exception: %s" % str(e).split('\n')[0])

        instance.tear_down()

        self.dump_test_result(instance.result)
Ejemplo n.º 26
0
    def __init__(self,
                 item_list,
                 run_testcase,
                 collect_result,
                 parallelism=None,
                 abort_file=None,
                 dyn_poll_interval=True):
        """Launch loop.

        :param item_list: a list of jobs or a dag
        :param run_testcase: a function that takes a job for argument and
            return the spawned process (ex.Run object). Its prototype should be
            func (name, job_info) with name the job identifier and job_info the
            related information, passed in a tuple (slot_number, job_retry)
            Note that if you want to take advantage of the parallelism the
            spawned process should be launched in background (ie with bg=True
            when using ex.Run).
            If run_testcase returns SKIP_EXECUTION instead of an ex.Run object
            the mainloop will directly call collect_result without waiting.
        :param collect_result: a function called when a job is finished. The
            prototype should be func (name, process, job_info). If
            collect_result raise NeedRequeue then the test will be requeued.
            job_info is a tuple: (slot_number, job_nb_retry)
        :param parallelism: number of workers
        :type parallelism: int | None
        :param abort_file: If specified, the loop will abort if the file is
            present
        :type abort_file: str | None
        :param dyn_poll_interval: If True the interval between each polling
            iteration is automatically updated. Otherwise it's set to 0.1
            seconds.
        :type dyn_poll_interval: bool
        """
        e = Env()
        self.parallelism = e.get_attr("main_options.mainloop_jobs",
                                      default_value=1,
                                      forced_value=parallelism)
        self.abort_file = e.get_attr("main_options.mainloop_abort_file",
                                     default_value=None,
                                     forced_value=abort_file)

        if self.parallelism == 0:
            if e.build.cpu.cores != UNKNOWN:
                self.parallelism = e.build.cpu.cores
            else:
                self.parallelism = 1

        logger.debug("start main loop with %d workers (abort on %s)" %
                     (self.parallelism, self.abort_file))
        self.workers = [None] * self.parallelism
        self.locked_items = [None] * self.parallelism

        if not isinstance(item_list, DAG):
            self.item_list = DAG(item_list)
        else:
            self.item_list = item_list

        self.iterator = self.item_list.__iter__()
        self.collect_result = collect_result
        active_workers = 0
        max_active_workers = self.parallelism
        poll_sleep = 0.1
        no_free_item = False

        try:
            while True:
                # Check for abortion
                if self.abort_file is not None and \
                        os.path.isfile(self.abort_file):
                    logger.info('Aborting: file %s has been found' %
                                self.abort_file)
                    self.abort()
                    return  # Exit the loop

                # Find free workers
                for slot, worker in enumerate(self.workers):
                    if worker is None:
                        # a worker slot is free so use it for next job
                        next_id, next_job = self.iterator.next()
                        if next_job is None:
                            no_free_item = True
                            break
                        else:
                            self.locked_items[slot] = next_id
                            self.workers[slot] = Worker(
                                next_job, run_testcase, collect_result, slot)
                            active_workers += 1

                poll_counter = 0
                logger.debug('Wait for free worker')
                while active_workers >= max_active_workers or no_free_item:
                    # All worker are occupied so wait for one to finish
                    poll_counter += 1
                    for slot, worker in enumerate(self.workers):
                        if worker is None:
                            continue

                        # Test if the worker is still active and have more
                        # job pending
                        if not (worker.poll() or worker.execute_next()):
                            # If not the case free the worker slot
                            active_workers -= 1
                            self.workers[slot] = None
                            self.item_list.release(self.locked_items[slot])
                            no_free_item = False
                            self.locked_items[slot] = None

                    sleep(poll_sleep)

                if dyn_poll_interval:
                    poll_sleep = compute_next_dyn_poll(poll_counter,
                                                       poll_sleep)

        except (StopIteration, KeyboardInterrupt) as e:
            if e.__class__ == KeyboardInterrupt:
                # Got ^C, abort the mainloop
                logger.error("User interrupt")

            # All the tests are finished
            while active_workers > 0:
                for slot, worker in enumerate(self.workers):
                    if worker is None:
                        continue

                    # Test if the worker is still active and ignore any
                    # job pending
                    try:
                        still_running = worker.poll()
                    except TooManyErrors:
                        still_running = False
                        # We're not spawing more tests so we can safely
                        # ignore all TooManyErrors exceptions.
                    if not still_running:
                        active_workers -= 1
                        self.workers[slot] = None
                    sleep(0.1)

            if e.__class__ == KeyboardInterrupt:
                self.abort()
                raise

        except TooManyErrors:
            # too many tests failure, abort the testsuite
            logger.error("Too many errors, aborting")
            self.abort()
Ejemplo n.º 27
0
def main():
    """Run the testsuite"""

    m = Main()
    add_mainloop_options(m, extended_options=True)
    add_run_test_options(m)
    m.add_option("--diffs",
                 dest="view_diffs",
                 action="store_true",
                 default=False,
                 help="show diffs on stdout")
    m.parse_args()

    # Various files needed or created by the testsuite
    # creates :
    #   the ouput directory (out by default)
    #   the report file
    #   the results file

    setup_result_dir(m.options)

    if m.args:
        test_list = [t.strip('/') for t in m.args]
    else:
        test_list = sorted(glob('tests/*'))

    env = Env()

    # add support module path
    python_lib = os.path.join(os.getcwd(), 'lib', 'python')
    Env().add_search_path("PYTHONPATH", python_lib)

    env.add_search_path('PYTHONPATH', os.getcwd())
    discs = [env.target.platform]

    if m.options.discs:
        discs += m.options.discs.split(',')

    test_metrics = {'total': len(test_list), 'uok': 0, 'invalid': 0}

    # Generate a standard 'collect_result' function...
    generated_collect_result = generate_collect_result(
        result_dir=m.options.output_dir,
        results_file=m.options.results_file,
        output_diff=m.options.view_diffs,
        metrics=test_metrics)

    # ... and then wrap that generated 'collect_result' function in something
    # that will also accumulate 'UOK' test results and failed tests
    def collect_test_metrics(name, process, _job_info):
        generated_collect_result(name, process, _job_info)
        test_name = os.path.basename(name)
        test_result = split_file(m.options.output_dir + '/' + test_name +
                                 '.result',
                                 ignore_errors=True)
        if test_result:
            test_status = test_result[0].split(':')[0]
            if test_status == 'UOK':
                test_metrics['uok'] += 1
            elif test_status == 'INVALID_TEST':
                test_metrics['invalid'] += 1

    run_testcase = generate_run_testcase('run-test', discs, m.options)

    MainLoop(test_list, run_testcase, collect_test_metrics,
             m.options.mainloop_jobs)

    print "Summary: Ran %(run)s/%(total)s tests, with %(failed)s failed, %(crashed)s crashed, %(uok)s unexpectedly passed, %(invalid)s invalid." % test_metrics

    # Generate the report file
    ReportDiff(m.options.output_dir,
               m.options.old_output_dir).txt_image(m.options.report_file)

    if (test_metrics['failed'] > 0 or test_metrics['crashed'] > 0
            or test_metrics['uok'] > 0 or test_metrics['invalid'] > 0):
        sys.exit(1)
Ejemplo n.º 28
0
"""
Testsuite control
"""

import os.path
import re

from gnatpython.env import Env
from gnatpython.ex import Run
from gnatpython.fileutils import rm

from SUITE.cutils import no_ext

env = Env()


def xcov_pgm(auto_arch, for_target=True):
    """Return the name of the "gnatcov" program to run.

    :param bool auto_arch: If True, autodetect which "gnatcov" run depending on
        FOR_TARGET.

    :param for_target: If True, consider that we run "gnatcov" for the target
        architecture. Otherwise, consider that we run it for the build
        architecture instead.
    """
    arch = env.target if for_target else env.build
    return 'gnatcov{bits}{ext}'.format(
        bits=str(arch.cpu.bits) if auto_arch else '', ext=env.host.os.exeext)

Ejemplo n.º 29
0
    def __init__(self, options):
        """Fill the test lists"""

        # Various files needed or created by the testsuite
        setup_result_dir(options)
        self.options = options

        # Always add ALL and target info
        self.discs = ['ALL'] + Env().discriminants
        if Env().target.os.name == 'vxworks6':
            self.discs.append('vxworks')

        if options.discs:
            self.discs += options.discs.split(',')

        if options.with_gdb:
            # Serialize runs and disable gprof
            options.mainloop_jobs = 1
            options.with_gprof = False

        # Read discriminants from testsuite.tags
        # The file testsuite.tags should have been generated by
        # AWS 'make setup'
        try:
            with open('testsuite.tags') as tags_file:
                self.discs += tags_file.read().strip().split()
        except IOError:
            sys.exit("Cannot find testsuite.tags. Please run make setup")

        if options.from_build_dir:
            os.environ["ADA_PROJECT_PATH"] = os.getcwd()
            # Read makefile.setup to set proper build environment
            c = MakeVar('../makefile.setup')
            os.environ["PRJ_BUILD"] = c.get("DEBUG", "true", "Debug",
                                            "Release")
            os.environ["PRJ_XMLADA"] = c.get("XMLADA", "true", "Installed",
                                             "Disabled")
            os.environ["PRJ_ASIS"] = c.get("ASIS", "true", "Installed",
                                           "Disabled")
            os.environ["PRJ_LDAP"] = c.get("LDAP", "true", "Installed",
                                           "Disabled")
            os.environ["PRJ_SOCKLIB"] = c.get("IPv6", "true", "IPv6", "GNAT")
            os.environ["SOCKET"] = c.get("SOCKET")
            os.environ["LIBRARY_TYPE"] = "static"
            # from-build-dir only supported on native platforms
            os.environ["PLATFORM"] = "native"
            # Add current tools in from of PATH
            os.environ["PATH"] = os.getcwd() + os.sep + ".." + os.sep \
                + ".build" + os.sep + os.environ["PLATFORM"] \
                + os.sep + os.environ["PRJ_BUILD"].lower() \
                + os.sep + "static" + os.sep + "tools" \
                + os.pathsep + os.environ["PATH"]

        logging.debug(
            "Running the testsuite with the following discriminants: %s" %
            ", ".join(self.discs))

        # Add current directory in PYTHONPATH (to find test_support.py)
        Env().add_search_path('PYTHONPATH', os.getcwd())
        os.environ["TEST_CONFIG"] = os.path.join(os.getcwd(), 'env.dump')

        Env().testsuite_config = options
        Env().store(os.environ["TEST_CONFIG"])

        # Save discriminants
        with open(options.output_dir + "/discs", "w") as discs_f:
            discs_f.write(" ".join(self.discs))
Ejemplo n.º 30
0
from gnatpython.env import Env
from test_support import Run, spark_install_path
import os.path

installdir = spark_install_path()
bindir = os.path.join(installdir, 'libexec', 'spark', 'bin')
Env().add_path(bindir)
process = Run(["gnatwhy3", "--show-config"])
print process.out
Ejemplo n.º 31
0
def run_testsuite(test_driver):
    """Run the testsuite

    PARAMETERS
      test_driver: path to the test driver (e.g. lib/python/run-test)
    """
    options = __parse_options()
    env = Env()

    if options.vc_timeout:
        os.environ["vc_timeout"] = str(options.vc_timeout)
    if options.debug:
        os.environ["debug"] = "true"
    if options.verbose:
        os.environ["verbose"] = "true"
    if options.inverse_prover:
        os.environ["inverse_prover"] = "true"
    if options.benchmarks:
        os.environ["benchmarks"] = "true"
    if options.cache:
        os.environ["cache"] = "true"

    if options.test_list:
        with open(options.test_list, 'r') as f:
            test_list = f.readlines()
            test_list =\
                map(lambda s: os.path.join("tests", s.strip()), test_list)
            test_list = [t for t in test_list if os.path.isdir(t)]
    elif options.exact_name:
        test_name = os.path.join('tests/', options.run_test)
        if os.path.isdir(test_name):
            test_list = [test_name]
        else:
            print 'error: test \'' + options.run_test + '\' not found'
            exit(1)
    elif options.pattern:
        test_list = filter_list('tests/*')
        reg = re.compile(options.pattern)
        test_list = [
            test for test in test_list if test_contains_pattern(test, reg)
        ]
    else:
        test_list = [
            t for t in filter_list('tests/*', options.run_test)
            if os.path.isdir(t)
        ]

    # Various files needed or created by the testsuite
    setup_result_dir(options)

    discs = env.discriminants

    if options.discs:
        discs += options.discs

    run_testcase = generate_run_testcase(test_driver, discs, options)
    collect_result = generate_collect_result(options.output_dir,
                                             options.results_file,
                                             options.view_diffs)

    MainLoop(test_list, run_testcase, collect_result, options.mainloop_jobs)

    # Write report
    with open(options.output_dir + '/discs', 'w') as discs_f:
        discs_f.write(" ".join(discs))
    ReportDiff(options.output_dir,
               options.old_output_dir).txt_image(options.report_file)