Ejemplo n.º 1
0
from subprocess import Popen, PIPE
from time import sleep

import imp
import os
import re
import sys

POLYORB_CONF = "POLYORB_CONF"

RLIMIT = int(os.environ['RLIMIT'])
TEST_NAME = os.environ['TEST_NAME']

# Restore testsuite environment
Env().restore(os.environ['TEST_CONFIG'])

# If POLYORB_TEST_VERBOSE is set to true, then output more data
VERBOSE = Env().options.verbose  # set by testsuite.py

# Main testsuite source dir
SRC_DIR = Env().options.testsuite_src_dir

# All executable tests path are relative to PolyORB testsuite build dir
BASE_DIR = os.path.join(Env().options.build_dir, 'testsuite')

# Import config module, which is generated by configure in the testsuite
# build directory (so not on the Python search path).
config = imp.load_source('config', os.path.join(BASE_DIR, 'tests',
                                                'config.py'))
Ejemplo n.º 2
0
def main():
    """Run the testsuite"""

    m = Main()
    add_mainloop_options(m, extended_options=True)
    add_run_test_options(m)
    m.add_option("--diffs",
                 dest="view_diffs",
                 action="store_true",
                 default=False,
                 help="show diffs on stdout")
    m.parse_args()

    # Various files needed or created by the testsuite
    # creates :
    #   the ouput directory (out by default)
    #   the report file
    #   the results file

    setup_result_dir(m.options)

    if m.args:
        test_list = [t.strip('/') for t in m.args]
    else:
        test_list = sorted(glob('tests/*'))

    env = Env()

    # add support module path
    python_lib = os.path.join(os.getcwd(), 'lib', 'python')
    Env().add_search_path("PYTHONPATH", python_lib)

    env.add_search_path('PYTHONPATH', os.getcwd())
    discs = [env.target.platform]

    if m.options.discs:
        discs += m.options.discs.split(',')

    test_metrics = {'total': len(test_list), 'uok': 0}

    # Generate a standard 'collect_result' function...
    generated_collect_result = generate_collect_result(
        result_dir=m.options.output_dir,
        results_file=m.options.results_file,
        output_diff=m.options.view_diffs,
        metrics=test_metrics)

    # ... and then wrap that generated 'collect_result' function in something
    # that will also accumulate 'UOK' test results
    def collect_result_including_uok(name, process, _job_info):
        generated_collect_result(name, process, _job_info)
        test_name = os.path.basename(name)
        test_result = split_file(m.options.output_dir + '/' + test_name +
                                 '.result',
                                 ignore_errors=True)
        if test_result:
            test_status = test_result[0].split(':')[0]
            if test_status == 'UOK':
                test_metrics['uok'] += 1

    run_testcase = generate_run_testcase('run-test', discs, m.options)

    MainLoop(test_list, run_testcase, collect_result_including_uok,
             m.options.mainloop_jobs)

    print "Summary: Ran %(run)s/%(total)s tests, with %(failed)s failed, %(crashed)s crashed, %(uok)s unexpectedly passed." % test_metrics

    # Generate the report file
    ReportDiff(m.options.output_dir,
               m.options.old_output_dir).txt_image(m.options.report_file)

    if test_metrics['failed'] > 0 or test_metrics[
            'crashed'] > 0 or test_metrics['uok'] > 0:
        sys.exit(1)
Ejemplo n.º 3
0
"""
This module contains support functions for all test.py
"""

import logging
import os
import sys

from glob import glob
from gnatpython.env import Env
from gnatpython.fileutils import cd, mv
from gnatpython.ex import Run

Env().restore(os.environ['TEST_CONFIG'])

# Move to test directory
ROOT_DIR = os.getcwd()
TEST_DIR = os.path.dirname(sys.modules['__main__'].__file__)
TEST_NAME = os.path.basename(TEST_DIR)


def setup():
    cd(TEST_DIR)
    for prj in glob('*.gpr'):
        with open(prj) as prj_orig:
            lines = [line for line in prj_orig]
            with open(prj + '.new', 'w') as prj_new:
                for line in lines:
                    line = line.replace('../common',
                                        os.path.join(ROOT_DIR, 'common'))
                    prj_new.write(line)
Ejemplo n.º 4
0
def run_testsuite(test_driver):
    """Run the testsuite

    PARAMETERS
      test_driver: path to the test driver (e.g. lib/python/run-test)
    """
    options = __parse_options()
    env = Env()

    if options.vc_timeout:
        os.environ["vc_timeout"] = str(options.vc_timeout)
    if options.debug:
        os.environ["debug"] = "true"
    if options.verbose:
        os.environ["verbose"] = "true"
    if options.inverse_prover:
        os.environ["inverse_prover"] = "true"
    if options.benchmarks:
        os.environ["benchmarks"] = "true"
    if options.cache:
        os.environ["cache"] = "true"
    if options.coverage:
        os.environ["coverage"] = "true"
    if options.z3_counterexample:
        os.environ["z3_counterexample"] = "true"

    if options.test_list:
        with open(options.test_list, 'r') as f:
            test_list = f.readlines()
            test_list =\
                map(lambda s: os.path.join("tests", s.strip()), test_list)
            test_list = [t for t in test_list if os.path.isdir(t)]
    elif options.exact_name:
        test_name = os.path.join('tests/', options.run_test)
        if os.path.isdir(test_name):
            test_list = [test_name]
        else:
            print 'error: test \'' + options.run_test + '\' not found'
            exit(1)
    elif options.pattern:
        test_list = filter_list('tests/*')
        reg = re.compile(options.pattern)
        test_list = [
            test for test in test_list if test_contains_pattern(test, reg)
        ]
    else:
        test_list = [
            t for t in filter_list('tests/*', options.run_test)
            if os.path.isdir(t)
        ]

    # Various files needed or created by the testsuite
    setup_result_dir(options)

    discs = env.discriminants

    if options.discs:
        discs += options.discs

    run_testcase = generate_run_testcase(test_driver, discs, options)
    collect_result = generate_collect_result(options.output_dir,
                                             options.results_file,
                                             options.view_diffs)

    MainLoop(test_list, run_testcase, collect_result, options.mainloop_jobs)

    # Write report
    with open(options.output_dir + '/discs', 'w') as discs_f:
        discs_f.write(" ".join(discs))
    ReportDiff(options.output_dir,
               options.old_output_dir).txt_image(options.report_file)
Ejemplo n.º 5
0
from SUITE import control
from SUITE.control import GPRCLEAN, BUILDER, LANGINFO

from SUITE.cutils import indent_after_first_line, lines_of, ndirs_in

# This module is loaded as part of a Run operation for a test.py
# file found and launched by the toplevel driver

# This is where the toplevel invocation was issued:
ROOT_DIR = os.getcwd()

# And this is the relative directory where test.py was found:
TEST_DIR = os.path.dirname(sys.modules['__main__'].__file__)

env = Env()

# ==================
# == ReportOutput ==
# ==================

# Internal helper to dispatch information to test.py.err/log/out


class _ReportOutput(object):
    """A class that allows us to write some text to a report file, while
    bufferizing part of it until we know whether this part should also
    be printed on standard output or not.  The idea is to buffer the
    output generated for each driver until the end of the test, and then
    print that output to stdout if we then determine that the test failed.
Ejemplo n.º 6
0
from gnatpython.env import Env
from test_support import Run, spark_install_path
import os.path

installdir = spark_install_path()
bindir = os.path.join(installdir, 'libexec', 'spark', 'bin')
Env().add_path(bindir)

process = Run(["cvc4", "--show-config"])
lines = process.out.splitlines()
# first three lines of cvc4 output contain date and exact compiler version, so
# remove this output. We also remove the "scm" line which refers to the exact
# git commit in some builds.
for line in lines[3:]:
    if not line.startswith("scm"):
        print line
Ejemplo n.º 7
0
def main():
    """Run the testsuite and generate reports"""
    # Parse the command lines options
    m = Main(add_targets_options=True)
    add_mainloop_options(m)
    add_run_test_options(m)
    m.add_option('--diffs',
                 dest='diffs',
                 action='store_true',
                 default=False,
                 help='show diffs on stdout')
    m.add_option("--old-result-dir",
                 type="string",
                 default=None,
                 help="Old result dir (to generate the report)")
    m.add_option('-b',
                 '--build-dir',
                 dest='build_dir',
                 help='separate PolyORB build directory')
    m.add_option('--testsuite-src-dir',
                 dest='testsuite_src_dir',
                 help='path to polyorb testsuite sources')
    m.add_option('--coverage',
                 dest='coverage',
                 action='store_true',
                 default=False,
                 help='generate coverage information')
    m.parse_args()

    # Various files needed or created by the testsuite
    results_file = m.options.output_dir + '/results'
    report_file = m.options.output_dir + '/report'

    if not m.options.failed_only:
        rm(m.options.output_dir, True)
        mkdir(m.options.output_dir)

    # Add current directory in PYTHONPATH (to find test_utils.py)
    env = Env()
    env.add_search_path('PYTHONPATH', os.path.join(os.getcwd(), 'tests'))
    fixed_support_dir = os.path.join(os.getcwd(), 'fixed_support_dir')
    env.add_search_path('FIXED_SUPPORT_DIR', fixed_support_dir)
    env.add_path(os.path.join(fixed_support_dir))
    env.add_path('.')  # many tests expect '.' in the PATH

    # Avoid extra debug traces
    os.environ['POLYORB_LOG_DEFAULT'] = 'error'

    # Generate the discs list for test.opt parsing
    # Always add 'ALL'
    common_discs = Env().discriminants

    # Be backward compatible with the old IDL tests
    # Set the polyorb discriminant and export the IDLCOMP
    # environment variable.
    common_discs.append('PolyORB')
    common_discs.append('PolyORB_IAC')
    os.environ['IDLCOMP'] = 'iac'

    # Retrieve also the polyorb specific discriminants
    p = Run([
        which('bash'),
        which('polyorb-config').replace('\\', '/'), '--config'
    ])

    # First find the support application perso.
    match = re.search('Application *personalities *: (.+)', p.out)
    if match is not None:
        common_discs += ['app_%s' % k for k in match.group(1).split()]

    # Then the supported protocols
    match = re.search('Protocol *personalities *: (.+)', p.out)
    if match is not None:
        common_discs += ['proto_%s' % k for k in match.group(1).split()]

    # Then the supported services
    match = re.search('Services *: (.+)', p.out)
    if match is not None:
        common_discs += ['serv_%s' % k for k in match.group(1).split()]

    # Do we have ssl support ?
    if re.search('SSL *support *: *yes', p.out):
        common_discs.append('ssl_support')

    with open(m.options.output_dir + '/discs', 'w') as f_disk:
        f_disk.write(", ".join(common_discs))

    # Expand ~ and ~user contructions for user PATH
    if m.options.build_dir is None:
        m.options.build_dir = os.path.join(os.getcwd(), os.pardir)
    else:
        m.options.build_dir = os.path.expanduser(m.options.build_dir)

    if m.options.testsuite_src_dir is None:
        m.options.testsuite_src_dir = os.path.join(os.getcwd())
    else:
        m.options.testsuite_src_dir = os.path.expanduser(
            m.options.testsuite_src_dir)

    # Compute the test list
    if m.args:
        test_glob = m.args[0]
    else:
        test_glob = None
    test_list = filter_list('./tests/*/*/*/test.py', test_glob)
    if os.path.isdir('regtests'):
        test_list.extend(filter_list('./regtests/*/test.*', test_glob))

    collect_result = generate_collect_result(m.options.output_dir,
                                             results_file, m.options.diffs)
    run_testcase = generate_run_testcase('tests/run-test.py', common_discs,
                                         m.options)

    os.environ['TEST_CONFIG'] = os.path.join(os.getcwd(), 'env.dump')
    env.options = m.options
    env.log_dir = os.path.join(os.getcwd(), 'log')
    env.store(os.environ['TEST_CONFIG'])

    if len(test_list) == 0:
        logger.error("No matching test found")
        return

    MainLoop(test_list, run_testcase, collect_result, m.options.mainloop_jobs)

    # Generate the report file
    ReportDiff(m.options.output_dir,
               m.options.old_result_dir).txt_image(report_file)
Ejemplo n.º 8
0
    def __init__(self,
                 test,
                 discs,
                 result_dir,
                 temp_dir=Env().tmp_dir,
                 enable_cleanup=True,
                 restricted_discs=None,
                 test_args=None,
                 failed_only=False,
                 default_timeout=780,
                 use_basename=True):
        """TestRunner constructor.

        :param test: location of the test
        :type test: str
        :param discs: list of discriminants
        :type discs: list[str]
        :param result_dir: directory in which results will be stored
        :type result_dir: str
        :param temp_dir: temporary directory used during test run
        :type temp_dir: str
        :param enable_cleanup: whether the temporary files needs to be removed
        :type enable_cleanup: bool
        :param restricted_discs: None or a list of discriminants
        :type restricted_discs:  list[str] | None
        :param test_args: ???
        :param failed_only: run failed only
        :type failed_only: bool
        :param default_timeout: timeout when executing a test
        :type default_timeout: int
        :param use_basename: if True use the test basename to get the test name
            else use the relative path
        :type use_basename: bool
        """
        self.test = test.rstrip('/')
        self.discs = discs
        self.cmd_line = None
        self.test_args = test_args
        self.enable_cleanup = enable_cleanup
        self.restricted_discs = restricted_discs
        self.skip = False  # if True, do not run execute()

        # Test name
        if use_basename:
            self.test_name = os.path.basename(self.test)
        else:
            self.test_name = os.path.relpath(self.test, os.getcwd())

        # Prefix of files holding the test result
        self.result_prefix = result_dir + '/' + self.test_name

        mkdir(os.path.dirname(self.result_prefix))

        # Temp directory in which the test will be run
        self.work_dir = os.path.realpath(
            os.path.join(temp_dir,
                         'tmp-test-%s-%d' % (self.test_name, os.getpid())))
        self.output = self.work_dir + '/tmpout'
        self.output_filtered = self.work_dir + '/tmpout.filtered'
        self.diff_output = self.work_dir + '/diff'
        self.cmdlog = self.work_dir + '/' + self.test_name + '.log'

        # Initial test status
        self.result = {'result': 'UNKNOWN', 'msg': '', 'is_failure': True}

        # Some tests save the pids of spawned background processes in
        # work_dir/.pids. The TEST_WORK_DIR environment variable is used to
        # pass the working directory location.
        os.environ['TEST_WORK_DIR'] = self.work_dir

        if failed_only:
            # Read old result now
            previous_result = self.read_result()
            if previous_result in IS_STATUS_FAILURE \
                    and not IS_STATUS_FAILURE[previous_result]:
                # We don't need to run this test. Return now
                self.skip = True
                return

        # Be sure to be a sane environment
        rm(self.result_prefix + '.result')
        rm(self.result_prefix + '.out')
        rm(self.result_prefix + '.expected')
        rm(self.result_prefix + '.diff')
        rm(self.result_prefix + '.log')
        rm(self.result_prefix + '.out.filtered')

        # Initialize options defaults (can be modified with test.opt).
        # By default a test is not DEAD, SKIP nor XFAIL. Its maximum execution
        # time is 780s. Test script is test.cmd and output is compared against
        # test.out.
        self.opt_results = {
            'RLIMIT': str(default_timeout),
            'DEAD': None,
            'XFAIL': False,
            'SKIP': None,
            'OUT': 'test.out',
            'CMD': 'test.cmd',
            'FILESIZE_LIMIT': None,
            'TIMING': None,
            'NOTE': None
        }
        self.opt_file = 'test.opt'

        # test.cmd have priority, if not found use test.py
        if not os.path.isfile(self.test +
                              '/test.cmd') and os.path.isfile(self.test +
                                                              '/test.py'):
            self.opt_results['CMD'] = 'test.py'
Ejemplo n.º 9
0
def add_run_test_options(m):
    """Add standard test driver options."""
    run_test_opts = m.create_option_group("Test driver options")
    run_test_opts.add_option("-o",
                             "--output-dir",
                             dest="output_dir",
                             metavar="DIR",
                             default="./out",
                             help="select output dir")
    run_test_opts.add_option("--timeout",
                             default='780',
                             metavar="SECONDS",
                             help="Default timeout")
    run_test_opts.add_option("-d",
                             "--discriminants",
                             dest="discs",
                             metavar="DISCS",
                             default="ALL",
                             help="set discriminants")
    run_test_opts.add_option("-t",
                             "--temp-dir",
                             dest="tmp",
                             metavar="DIR",
                             default=Env().tmp_dir)
    run_test_opts.add_option("-e",
                             "--env-file",
                             dest="env_file",
                             metavar="FILE",
                             default="load env file")
    run_test_opts.add_option("--disable-cleanup",
                             dest="enable_cleanup",
                             action="store_false",
                             default=True,
                             help="disable cleanup of working space")
    run_test_opts.add_option(
        "--dump-environ",
        dest="dump_environ",
        action="store_true",
        default=False,
        help="Dump all environment variables in a file named environ.sh,"
        " located in the output directory (see --output-dir). This"
        " file can then be sourced from a Bourne shell to recreate"
        " the environement that existed when this testsuite was run"
        " to produce a given testsuite report.")
    run_test_opts.add_option("-r",
                             "--restricted-mode",
                             dest="restricted_discs",
                             metavar="DISCS",
                             default=None,
                             help="enable restricted mode")
    run_test_opts.add_option(
        '-f',
        '--failed-only',
        action="store_true",
        help="run failed only - skip the test is last result is OK")
    run_test_opts.add_option(
        '--use-basename',
        action='store_true',
        help="Use os.path.basename to get the real name of a test. "
        "Note that this will only work if you don't have two tests with "
        "the same name in your test directories")
    m.add_option_group(run_test_opts)
Ejemplo n.º 10
0
    def compute_cmd_line_cmd(self, filesize_limit):
        """Compute self.cmd_line and preprocess the test script.

        This function is called by compute_cmd_line
        """
        cmd = self.opt_results['CMD']
        if Env().host.os.name != 'windows':
            script = split_file(cmd)

            # The test is run on a Unix system but has a 'cmd' syntax.
            # Convert it to Bourne shell syntax.
            cmdfilter = Filter()
            cmdfilter.append([r'-o(.*).exe', r'-o \1'])
            cmdfilter.append([r'%([^ ]*)%', r'"$\1"'])
            cmdfilter.append([r'(\032|\015)', r''])
            cmdfilter.append(
                [r'set *([^ =]+) *= *([^ ]*)', r'\1="\2"; export \1'])
            script = cmdfilter.process(script)

            cmd = self.work_dir + '/__test.sh'
            echo_to_file(cmd, 'PATH=.:$PATH; export PATH\n')

            # Compute effective file size limit on Unix system.
            if filesize_limit > 0:
                # File size limit can be specified either by a default or by
                # mean of FILESIZE_LIMIT command in the test test.opt. When
                # both are specified use the upper limit (note that 0 means
                # unlimited).
                opt_limit = self.opt_results['FILESIZE_LIMIT']
                if opt_limit is not None:
                    try:
                        opt_limit = int(opt_limit)
                    except TypeError:
                        opt_limit = filesize_limit
                else:
                    opt_limit = filesize_limit

                if opt_limit != 0:
                    if filesize_limit < opt_limit:
                        filesize_limit = opt_limit

                    # Limit filesize. Argument to ulimit is a number of blocks
                    # (512 bytes) so multiply by two the argument given by the
                    # user. Filesize limit is not supported on Windows.
                    echo_to_file(cmd, 'ulimit -f %s\n' % (filesize_limit * 2),
                                 True)

            # Source support.sh in TEST_SUPPORT_DIR if set
            if 'TEST_SUPPORT_DIR' in os.environ and os.path.isfile(
                    os.environ['TEST_SUPPORT_DIR'] + '/support.sh'):
                echo_to_file(cmd, '. $TEST_SUPPORT_DIR/support.sh\n', True)

            echo_to_file(cmd, script, True)

            self.cmd_line += ['bash', cmd]
        else:
            # On windows system, use cmd to run the script.
            if cmd[-4:] != '.cmd':
                # We are about to use cmd.exe to run a test. In this case,
                # ensure that the file extension is .cmd otherwise a dialog box
                # will popup asking to choose the program that should be used
                # to run the script.
                cp(cmd, self.work_dir + '/test__.cmd')
                cmd = self.work_dir + '/test__.cmd'

            self.cmd_line += ['cmd.exe', '/q', '/c', cmd]