Beispiel #1
0
 def cleanup(self, force=False):
     """Remove generated files."""
     rm(self.result_prefix + '.result')
     rm(self.result_prefix + '.out')
     rm(self.result_prefix + '.expected')
     rm(self.result_prefix + '.diff')
     rm(self.result_prefix + '.log')
Beispiel #2
0
    def cleanup(self, project):
        """Cleanup possible remnants of previous builds."""

        Run([GPRCLEAN, "-P%s" % project] + self.gprconfoptions +
            self.gprvaroptions)
        rm('*.xcov')
        rm('*.bin')
Beispiel #3
0
def remove (path):
    """Delete the file or directory subtree designated by PATH"""

    print "from : %s" % os.getcwd()
    print "remove : %s" % path

    # To prevent big damage if the input PATH argument happens to have been
    # miscomputed, we first attempt to move it locally, then remove the local
    # instance. The absence of computation on this local name makes it a tad
    # safer to manipulate and the attempt to move locally would fail for
    # obviously bogus arguments such as anything leading to a parent of the
    # current dir (e.g. "/", or ...).

    local_name = "./old_stuff_to_be_removed"

    # Note that what we have to remove maybe be a regular filee or an entire
    # directory subtree and that rm("recursive=True") is not guaranteed to
    # work for regular files.

    # Start by removing the current local instance, in case the previous
    # removal failed or was interrupted somehow.

    def isdir(path):
        return os.path.isdir(path) and not os.path.islink(path)

    if os.path.exists (local_name):
        rm (local_name, recursive=isdir(local_name))

    if os.path.exists (path):
        mv (path, local_name)
        rm (local_name, recursive=isdir(local_name))
Beispiel #4
0
    def tear_down(self):
        """Execute operation when finalizing the testsuite.

        By default clean the working directory in which the tests
        were run
        """
        if self.main.options.enable_cleanup:
            rm(self.working_dir, True)
Beispiel #5
0
    def clean(self):
        """Clean up working space.

        Clean any temporary files
        """
        # Clean up before exiting
        if self.keep_test_dir_on_failure:
            mv(self.work_dir, self.failed_bin_path)
        else:
            rm(self.work_dir, True)
Beispiel #6
0
def main():
    """Run the testsuite"""
    options = __parse_options()
    assert os.path.exists(makedir("bin")), \
        "cannot find %s directory" % makedir("bin")
    assert os.path.exists(makedir("rbin")), \
        "cannot find %s directory" % makedir("rbin")
    env = Env()
    env.add_search_path("PYTHONPATH", os.getcwd())

    test_list = [t for t in filter_list('tests/*', options.run_test)
                 if os.path.isdir(t)]

    # Various files needed or created by the testsuite
    result_dir = options.output_dir
    results_file = result_dir + '/results'

    if os.path.exists(result_dir):
        rm(result_dir, True)

    mkdir(result_dir)

    discs = env.discriminants

    if options.discs:
        discs += options.discs

    def test_build_cmd(test, _):
        """Run the given test"""
        cmd = [sys.executable, 'run-test',
               '-d', ",".join(discs),
               '-o', result_dir,
               '-t', options.tmp,
               test]
        if options.verbose:
            cmd.append('-v')
        if options.host:
            cmd.append('--host=' + options.host)
        if options.target:
            cmd.append('--target=' + options.target)
        if not options.enable_cleanup:
            cmd.append('--disable-cleanup')
        return Run(cmd, bg=True)

    collect_result = generate_collect_result(
        result_dir, results_file, options.view_diffs)

    MainLoop(test_list, test_build_cmd, collect_result, options.mainloop_jobs)

    # Write report
    with open(result_dir + '/discs', 'w') as discs_f:
        discs_f.write(" ".join(discs))
    ReportDiff(result_dir, options.old_result_dir).txt_image(
        result_dir + '/report')
Beispiel #7
0
    def write_log(self, filename):
        """Serialize the options_for_tool database to a text file.

        :param filename: a log path (usually testsuite_support.log)
        :type filename: str
        """
        rm(filename)
        with open(filename, 'w') as cmdlog:
            for tool in self.options_for_tool:
                line = tool + ' ' + ' '.join(self.options_for_tool[tool])
                cmdlog.write(line + '\n')
Beispiel #8
0
def main():
    """Run the testsuite"""
    options = __parse_options()
    assert os.path.exists(makedir("bin")), \
        "cannot find %s directory" % makedir("bin")
    assert os.path.exists(makedir("rbin")), \
        "cannot find %s directory" % makedir("rbin")
    env = Env()
    env.add_search_path("PYTHONPATH", os.getcwd())

    test_list = [
        t for t in filter_list('tests/*', options.run_test) if os.path.isdir(t)
    ]

    # Various files needed or created by the testsuite
    result_dir = options.output_dir
    results_file = result_dir + '/results'

    if os.path.exists(result_dir):
        rm(result_dir, True)

    mkdir(result_dir)

    discs = env.discriminants

    if options.discs:
        discs += options.discs

    def test_build_cmd(test, _):
        """Run the given test"""
        cmd = [
            sys.executable, 'run-test', '-d', ",".join(discs), '-o',
            result_dir, '-t', options.tmp, test
        ]
        if options.verbose:
            cmd.append('-v')
        if options.host:
            cmd.append('--host=' + options.host)
        if options.target:
            cmd.append('--target=' + options.target)
        if not options.enable_cleanup:
            cmd.append('--disable-cleanup')
        return Run(cmd, bg=True)

    collect_result = generate_collect_result(result_dir, results_file,
                                             options.view_diffs)

    MainLoop(test_list, test_build_cmd, collect_result, options.mainloop_jobs)

    # Write report
    with open(result_dir + '/discs', 'w') as discs_f:
        discs_f.write(" ".join(discs))
    ReportDiff(result_dir,
               options.old_result_dir).txt_image(result_dir + '/report')
Beispiel #9
0
    def setup_result_dir(self):
        """Create the output directory in which the results are stored."""
        if os.path.isdir(self.old_output_dir):
            rm(self.old_output_dir, True)
        if os.path.isdir(self.output_dir):
            mv(self.output_dir, self.old_output_dir)
        mkdir(self.output_dir)

        if self.main.options.dump_environ:
            with open(os.path.join(self.output_dir, 'environ.sh'), 'w') as f:
                for var_name in sorted(os.environ):
                    f.write('export %s=%s\n' %
                            (var_name, quote_arg(os.environ[var_name])))
Beispiel #10
0
    def RUN_CONFIG_SEQUENCE(toplev_options):
        """Arrange to generate the SUITE_CONFIG configuration file"""

        # In principle, this would be something like
        #
        #  gprconfig --config=C --config=Asm --config=Ada --target=powerpc-elf
        #
        # to latch the compiler selections for all the languages, plus extra
        # bits for the RTS selection.
        #
        # RTS selection by relative path (e.g.
        #   --RTS=powerpc-elf/ravenscar-full-prep) isn't supported by
        # gprconfig, however. It is supported gprbuild though, so we resort
        # to it here.

        # We build a temporary dummy project file in the current directory,
        # specifying languages only.

        tempgpr = open("suite.gpr", "w")

        tempgpr.write(
            '\n'.join(('project %(prjname)s is',
                       '  for Languages use ("Asm", "C", "Ada", "C++");',
                       'end %(prjname)s;')) %
            {'prjname': os.path.basename(tempgpr.name).split('.')[0]})
        tempgpr.close()

        # We now run gprbuild -Ptemp.gpr --target=bla --RTS=blo, which
        # will complain about missing sources, but only after producing
        # an automatic config file with everything we need, and nothing
        # else (no other file).

        rm(BUILDER.SUITE_CGPR)

        extraopts = []
        if toplev_options.RTS:
            extraopts.append('--RTS=%s' % toplev_options.RTS)

        # Request a specific target only if one is explicitly called
        # for. On native configurations, note that we may be running 32bit
        # tools on a 64bit host.

        if toplev_options.target:
            extraopts.append('--target=%s' % env.target.triplet)

        Run([
            GPRBUILD, '-P', tempgpr.name,
            '--autoconf=%s' % BUILDER.SUITE_CGPR
        ] + extraopts)

        rm(tempgpr.name)
Beispiel #11
0
    def prepare_working_space(self):
        """Prepare working space.

        Set the working space in self.work_dir. This resets the working
        directory and copies the test into <work_dir>/src. This
        directory can be used to hold temp files as it will be
        automatically deleted at the end of the test by the clean method
        """
        # At this stage the test should be executed so start copying test
        # sources in a temporary location.
        rm(self.work_dir, True)
        mkdir(self.work_dir)
        try:
            shutil.copytree(self.test, self.work_dir + '/src')
        except shutil.Error:
            print >> sys.stderr, "Error when copying %s in %s" % (
                self.test, self.work_dir + '/src')
Beispiel #12
0
def main():
    """Run the testsuite.
    """
    m = Main()
    add_mainloop_options(m, extended_options=True)
    add_run_test_options(m)
    m.add_option("--diffs",
                 dest="view_diffs",
                 action="store_true",
                 default=False,
                 help="show diffs on stdout")
    m.parse_args()

    # Create a tmp directory for the entire testsuite, to make sure
    # that, should the git hooks leak any file/directories, we can
    # (1) detect them, and (2) delete them.
    #
    # This requires some extra work to make sure that the scripts
    # being tested do actually use them, but this needs to be done
    # by each testcase, because we want each testcase to have its
    # own tmp directory (allowing for concurrency).  We pass that
    # information to the testcase through the GIT_HOOKS_TESTSUITE_TMP
    # environment variable.
    m.options.tmp = mkdtemp('', 'git-hooks-TS-', m.options.tmp)
    os.environ['GIT_HOOKS_TESTSUITE_TMP'] = m.options.tmp

    try:
        testcases = get_testcases(m.args)
        setup_result_dir(m.options)

        # We do not need discriminants in this testsuite at the moment.
        discs = None

        metrics = {}
        collect_result = generate_collect_result(metrics=metrics,
                                                 options=m.options)
        run_testcase = generate_run_testcase('bin/run-testcase', discs,
                                             m.options)

        MainLoop(testcases, run_testcase, collect_result,
                 m.options.mainloop_jobs)
        print_testsuite_results_summary(metrics)
    finally:
        rm(m.options.tmp, recursive=True)
Beispiel #13
0
def setup_result_dir(options):
    """Save old results and create new result dir.

    :param options: test driver and Main options. This dictionary will be
        modified in place to set: `results_file`, the path to the results file,
        `report_file`, the path to the report file. Note that
        `output_dir` and `old_output_dir` might be modified if
        keep_old_output_dir is True

    Required options are `output_dir`, `keep_old_output_dir`,
    `old_output_dir`, `skip_if_ok` and `skip_if_already_run`.
    Where:

    - output_dir: directory containing test result
    - keep_old_output_dir: if True, move last results in
      old_output_dir
    - old_output_dir:directory where the last results are kept.
      Note that if old_output_dir is None, and keep_old_output_dir
      is True, the last tests results will be moved in
      output_dir/old and the new ones in output_dir/new
    - skip_if_ok, skip_if_already_run: if one of these options is set to
      True, then just remove the results file.
    """
    output_dir = options.output_dir

    if options.keep_old_output_dir and options.old_output_dir is None:
        options.old_output_dir = os.path.join(output_dir, 'old')
        options.output_dir = os.path.join(output_dir, 'new')

    options.results_file = os.path.join(options.output_dir, 'results')
    options.report_file = os.path.join(options.output_dir, 'report')

    if options.skip_if_ok or options.skip_if_already_run:
        # Remove only the results file
        rm(options.results_file)
    else:
        if not options.keep_old_output_dir:
            # We don't want to keep old results. Just clean the new output_dir
            if os.path.exists(options.output_dir):
                rm(options.output_dir, True)
        else:
            # Move output_dir to old_output_dir
            if os.path.exists(options.old_output_dir):
                rm(options.old_output_dir, True)
            if os.path.exists(options.output_dir):
                mv(options.output_dir, options.old_output_dir)
            else:
                mkdir(options.old_output_dir)

    mkdir(options.output_dir)

    # For the testsuites that used gnatpython.testdriver.add_run_test_options,
    # the user has the option of requesting that the environment be dumped
    # in the form of a shell script inside the output_dir.  If requested,
    # do it now.
    if hasattr(options, 'dump_environ') and options.dump_environ:
        with open(os.path.join(options.output_dir, 'environ.sh'), 'w') as f:
            for var_name in sorted(os.environ):
                f.write('export %s=%s\n' %
                        (var_name, quote_arg(os.environ[var_name])))
Beispiel #14
0
    def __init__(self, url, dest, rev=None, use_externals=False, force=False):
        """SVNBase constructor.

        :type url: str
        :type dest: str
        :type rev: str | None
        :type use_externals: bool
        :type force: bool
        """
        self.url = url.rstrip('/')
        self.dest = dest
        self.use_externals = use_externals
        self.force = force
        self.rev = rev

        is_valid = self.is_valid()
        if not is_valid:
            if is_valid is not None:
                if not force:
                    # Directory is a svn working directory
                    self.error("destination is already a svn working dir on"
                               " different url")

                try:
                    # First try a subversion switch command
                    vcslogger.debug('switch %s to %s' % (self.dest, self.url))
                    self.switch()
                    # If successful then return.
                    return
                except SVN_Error:
                    pass

            if force:
                vcslogger.debug('cleanup dest directory: %s' % self.dest)
                rm(self.dest, recursive=True)

            vcslogger.debug('checkout %s@%s in %s' %
                            (self.url, self.rev, self.dest))
            self.checkout()
            self.update()
Beispiel #15
0
    def collect_result(name, process, _job_info):
        """Default collect result function.

        Read .result and .note file in {result_dir}/{test_name} dir
        Then append result to {result_file}

        If output_diff is True, print the content of .diff files

        Name should be the path to the test directory
        """
        # Unused parameter
        del _job_info
        if metrics is not None:
            # Increment number of run tests
            metrics['run'] += 1

        if use_basename:
            test_name = os.path.basename(name)
        else:
            test_name = os.path.relpath(name, os.getcwd())

        test_result = split_file(result_dir + '/' + test_name + '.result',
                                 ignore_errors=True)
        if not test_result:
            if process == SKIP_EXECUTION:
                test_result = 'CRASH:test skipped'
            else:
                test_result = 'CRASH:cannot read result file'
        else:
            test_result = test_result[0]
            if not test_result:
                test_result = 'CRASH: invalid result file'

        test_note = split_file(result_dir + '/' + test_name + '.note',
                               ignore_errors=True)

        if not test_note:
            test_note = ""
        else:
            test_note = test_note[0]

        # Append result to results file
        echo_to_file(results_file,
                     "%s:%s %s\n" % (test_name, test_result, test_note),
                     append=True)

        testsuite_logging.append_to_logfile(test_name, result_dir)

        test_status = test_result.split(':')[0]
        if test_status not in (DIFF_STATUS + CRASH_STATUS):
            # The command line log is not useful in these cases so it is
            # removed.
            cmdlog = result_dir + '/' + test_name + '.log'
            if os.path.isfile(cmdlog):
                rm(cmdlog)

        if metrics is not None:
            diffs_format = options.diffs_format if hasattr(
                options, 'diffs_format') else None

            # Set last test name
            metrics['last'] = test_name

            # Update metrics and diffs or xfail_diffs file
            diffs_file = os.path.join(result_dir, 'diffs')
            xfail_diffs_file = os.path.join(result_dir, 'xfail_diffs')

            if test_status in DIFF_STATUS:
                metrics['failed'] += 1
                if test_name not in metrics['old_diffs']:
                    metrics['new_failed'] += 1
                get_test_diff(result_dir, test_name, test_note, test_result,
                              diffs_file, diffs_format)
            elif test_status in CRASH_STATUS:
                metrics['crashed'] += 1
                if test_name not in metrics['old_crashes']:
                    metrics['new_crashed'] += 1
                get_test_diff(result_dir, test_name, test_note, test_result,
                              diffs_file, diffs_format)
            elif test_status in XFAIL_STATUS:
                get_test_diff(result_dir, test_name, test_note, test_result,
                              xfail_diffs_file, diffs_format)

            if max_consecutive_failures and process != SKIP_EXECUTION:
                # Count number of consecutive failures
                if test_status in FAIL_STATUS:
                    # ignore XFAIL
                    if test_status not in XFAIL_STATUS:
                        metrics['max_consecutive_failures'] += 1
                elif test_status in SKIP_STATUS:
                    # ignore DEAD or SKIP tests
                    pass
                else:
                    metrics['max_consecutive_failures'] = 0

            # Update global status
            s = []
            if "JOB_ID" in os.environ:
                s.append("%s running tests since %s\n" %
                         (os.environ['JOB_ID'], start_time_str))

            s.append("%(run)s out of %(total)s processed (now at %(last)s)" %
                     metrics)
            s.append("%(new_failed)s new potential regression(s)"
                     " among %(failed)s" % metrics)
            s.append("%(new_crashed)s new crash(es) among %(crashed)s" %
                     metrics)
            echo_to_file(os.path.join(result_dir, 'status'),
                         '\n'.join(s) + '\n')

        if process != SKIP_EXECUTION:
            # else the test has been skipped. No need to print its status.
            if test_status in (DIFF_STATUS + CRASH_STATUS):
                logging_func = logging.error
            else:
                logging_func = logging.info

            logging_func("%-30s %s %s" % (test_name, test_result, test_note))

            if output_diff:
                diff_filename = result_dir + '/' + test_name + '.diff'
                if os.path.exists(diff_filename):
                    with open(diff_filename) as diff_file:
                        logging_func(diff_file.read().strip())

        # Exit the mainloop if too many errors (more than
        # max_consecutive_failures)
        if metrics and max_consecutive_failures \
                and process != SKIP_EXECUTION and metrics[
                    'max_consecutive_failures'] >= max_consecutive_failures:
            raise TooManyErrors
Beispiel #16
0
    def __init__(self,
                 test,
                 discs,
                 result_dir,
                 temp_dir=Env().tmp_dir,
                 enable_cleanup=True,
                 restricted_discs=None,
                 test_args=None,
                 failed_only=False,
                 default_timeout=780,
                 use_basename=True):
        """TestRunner constructor.

        :param test: location of the test
        :type test: str
        :param discs: list of discriminants
        :type discs: list[str]
        :param result_dir: directory in which results will be stored
        :type result_dir: str
        :param temp_dir: temporary directory used during test run
        :type temp_dir: str
        :param enable_cleanup: whether the temporary files needs to be removed
        :type enable_cleanup: bool
        :param restricted_discs: None or a list of discriminants
        :type restricted_discs:  list[str] | None
        :param test_args: ???
        :param failed_only: run failed only
        :type failed_only: bool
        :param default_timeout: timeout when executing a test
        :type default_timeout: int
        :param use_basename: if True use the test basename to get the test name
            else use the relative path
        :type use_basename: bool
        """
        self.test = test.rstrip('/')
        self.discs = discs
        self.cmd_line = None
        self.test_args = test_args
        self.enable_cleanup = enable_cleanup
        self.restricted_discs = restricted_discs
        self.skip = False  # if True, do not run execute()

        # Test name
        if use_basename:
            self.test_name = os.path.basename(self.test)
        else:
            self.test_name = os.path.relpath(self.test, os.getcwd())

        # Prefix of files holding the test result
        self.result_prefix = result_dir + '/' + self.test_name

        mkdir(os.path.dirname(self.result_prefix))

        # Temp directory in which the test will be run
        self.work_dir = os.path.realpath(
            os.path.join(temp_dir,
                         'tmp-test-%s-%d' % (self.test_name, os.getpid())))
        self.output = self.work_dir + '/tmpout'
        self.output_filtered = self.work_dir + '/tmpout.filtered'
        self.diff_output = self.work_dir + '/diff'
        self.cmdlog = self.work_dir + '/' + self.test_name + '.log'

        # Initial test status
        self.result = {'result': 'UNKNOWN', 'msg': '', 'is_failure': True}

        # Some tests save the pids of spawned background processes in
        # work_dir/.pids. The TEST_WORK_DIR environment variable is used to
        # pass the working directory location.
        os.environ['TEST_WORK_DIR'] = self.work_dir

        if failed_only:
            # Read old result now
            previous_result = self.read_result()
            if previous_result in IS_STATUS_FAILURE \
                    and not IS_STATUS_FAILURE[previous_result]:
                # We don't need to run this test. Return now
                self.skip = True
                return

        # Be sure to be a sane environment
        rm(self.result_prefix + '.result')
        rm(self.result_prefix + '.out')
        rm(self.result_prefix + '.expected')
        rm(self.result_prefix + '.diff')
        rm(self.result_prefix + '.log')
        rm(self.result_prefix + '.out.filtered')

        # Initialize options defaults (can be modified with test.opt).
        # By default a test is not DEAD, SKIP nor XFAIL. Its maximum execution
        # time is 780s. Test script is test.cmd and output is compared against
        # test.out.
        self.opt_results = {
            'RLIMIT': str(default_timeout),
            'DEAD': None,
            'XFAIL': False,
            'SKIP': None,
            'OUT': 'test.out',
            'CMD': 'test.cmd',
            'FILESIZE_LIMIT': None,
            'TIMING': None,
            'NOTE': None
        }
        self.opt_file = 'test.opt'

        # test.cmd have priority, if not found use test.py
        if not os.path.isfile(self.test +
                              '/test.cmd') and os.path.isfile(self.test +
                                                              '/test.py'):
            self.opt_results['CMD'] = 'test.py'
Beispiel #17
0
def main():
    """Run the testsuite and generate reports"""
    # Parse the command lines options
    m = Main(add_targets_options=True)
    add_mainloop_options(m)
    add_run_test_options(m)
    m.add_option('--diffs',
                 dest='diffs',
                 action='store_true',
                 default=False,
                 help='show diffs on stdout')
    m.add_option("--old-result-dir",
                 type="string",
                 default=None,
                 help="Old result dir (to generate the report)")
    m.add_option('-b',
                 '--build-dir',
                 dest='build_dir',
                 help='separate PolyORB build directory')
    m.add_option('--testsuite-src-dir',
                 dest='testsuite_src_dir',
                 help='path to polyorb testsuite sources')
    m.add_option('--coverage',
                 dest='coverage',
                 action='store_true',
                 default=False,
                 help='generate coverage information')
    m.parse_args()

    # Various files needed or created by the testsuite
    results_file = m.options.output_dir + '/results'
    report_file = m.options.output_dir + '/report'

    if not m.options.failed_only:
        rm(m.options.output_dir, True)
        mkdir(m.options.output_dir)

    # Add current directory in PYTHONPATH (to find test_utils.py)
    env = Env()
    env.add_search_path('PYTHONPATH', os.path.join(os.getcwd(), 'tests'))
    fixed_support_dir = os.path.join(os.getcwd(), 'fixed_support_dir')
    env.add_search_path('FIXED_SUPPORT_DIR', fixed_support_dir)
    env.add_path(os.path.join(fixed_support_dir))
    env.add_path('.')  # many tests expect '.' in the PATH

    # Avoid extra debug traces
    os.environ['POLYORB_LOG_DEFAULT'] = 'error'

    # Generate the discs list for test.opt parsing
    # Always add 'ALL'
    common_discs = Env().discriminants

    # Be backward compatible with the old IDL tests
    # Set the polyorb discriminant and export the IDLCOMP
    # environment variable.
    common_discs.append('PolyORB')
    common_discs.append('PolyORB_IAC')
    os.environ['IDLCOMP'] = 'iac'

    # Retrieve also the polyorb specific discriminants
    p = Run([
        which('bash'),
        which('polyorb-config').replace('\\', '/'), '--config'
    ])

    # First find the support application perso.
    match = re.search('Application *personalities *: (.+)', p.out)
    if match is not None:
        common_discs += ['app_%s' % k for k in match.group(1).split()]

    # Then the supported protocols
    match = re.search('Protocol *personalities *: (.+)', p.out)
    if match is not None:
        common_discs += ['proto_%s' % k for k in match.group(1).split()]

    # Then the supported services
    match = re.search('Services *: (.+)', p.out)
    if match is not None:
        common_discs += ['serv_%s' % k for k in match.group(1).split()]

    # Do we have ssl support ?
    if re.search('SSL *support *: *yes', p.out):
        common_discs.append('ssl_support')

    with open(m.options.output_dir + '/discs', 'w') as f_disk:
        f_disk.write(", ".join(common_discs))

    # Expand ~ and ~user contructions for user PATH
    if m.options.build_dir is None:
        m.options.build_dir = os.path.join(os.getcwd(), os.pardir)
    else:
        m.options.build_dir = os.path.expanduser(m.options.build_dir)

    if m.options.testsuite_src_dir is None:
        m.options.testsuite_src_dir = os.path.join(os.getcwd())
    else:
        m.options.testsuite_src_dir = os.path.expanduser(
            m.options.testsuite_src_dir)

    # Compute the test list
    if m.args:
        test_glob = m.args[0]
    else:
        test_glob = None
    test_list = filter_list('./tests/*/*/*/test.py', test_glob)
    if os.path.isdir('regtests'):
        test_list.extend(filter_list('./regtests/*/test.*', test_glob))

    collect_result = generate_collect_result(m.options.output_dir,
                                             results_file, m.options.diffs)
    run_testcase = generate_run_testcase('tests/run-test.py', common_discs,
                                         m.options)

    os.environ['TEST_CONFIG'] = os.path.join(os.getcwd(), 'env.dump')
    env.options = m.options
    env.log_dir = os.path.join(os.getcwd(), 'log')
    env.store(os.environ['TEST_CONFIG'])

    if len(test_list) == 0:
        logger.error("No matching test found")
        return

    MainLoop(test_list, run_testcase, collect_result, m.options.mainloop_jobs)

    # Generate the report file
    ReportDiff(m.options.output_dir,
               m.options.old_result_dir).txt_image(report_file)
Beispiel #18
0
from gnatpython.fileutils import rm

from testsuite_support.utils import run, print_nonprintable


rm('last-line-missing-crlf.ads.pp')
run('gnatpp --quiet --output=last-line-missing-crlf.ads.pp'
    ' last-line-missing-crlf.ads')
print_nonprintable('last-line-missing-crlf.ads.pp')
Beispiel #19
0
 def tear_down(self):
     keep_project = self.global_env['options'].keep_project
     create_only = self.global_env['options'].create_projects
     if self.project_is_tmp and not keep_project and not create_only:
         rm(self.project)
 def tear_down(self):
     keep_project = self.global_env['options'].keep_project
     create_only = self.global_env['options'].create_projects
     if self.project_is_tmp and not keep_project and not create_only:
         rm(self.project)