Beispiel #1
0
    def build_as_needed (self, docformat):

        self.passno += 1
        self.this_docformat = docformat

        mkdir (self.itemsdir())

        if self.do_str() and self.passno == 1:
            self.__prepare_str_dir()
        
        if self.o.testsuite_dir and not self.local_testsuite_dir:
            self.__localize_testsuite_dir ()

        # Build the STR as needed, using the REST generated
        # by prepare_str_dir above:

        if self.do_str():
            self.build_str()

        # Build the TOR as needed, which might look into testsuite results to
        # match TC artifacts against presence of test data dumps:

        if self.do_tor():
            self.build_tor()

        # Build the PLANS as needed:

        if self.do_plans():
            self.build_plans()

        # Build a kit package as queried:

        if self.do_kit():
            qmat.build_kit()
Beispiel #2
0
def setup_result_dir(options):
    """Save old results and create new result dir.

    :param options: test driver and Main options. This dictionary will be
        modified in place to set: `results_file`, the path to the results file,
        `report_file`, the path to the report file. Note that
        `output_dir` and `old_output_dir` might be modified if
        keep_old_output_dir is True

    Required options are `output_dir`, `keep_old_output_dir`,
    `old_output_dir`, `skip_if_ok` and `skip_if_already_run`.
    Where:

    - output_dir: directory containing test result
    - keep_old_output_dir: if True, move last results in
      old_output_dir
    - old_output_dir:directory where the last results are kept.
      Note that if old_output_dir is None, and keep_old_output_dir
      is True, the last tests results will be moved in
      output_dir/old and the new ones in output_dir/new
    - skip_if_ok, skip_if_already_run: if one of these options is set to
      True, then just remove the results file.
    """
    output_dir = options.output_dir

    if options.keep_old_output_dir and options.old_output_dir is None:
        options.old_output_dir = os.path.join(output_dir, 'old')
        options.output_dir = os.path.join(output_dir, 'new')

    options.results_file = os.path.join(options.output_dir, 'results')
    options.report_file = os.path.join(options.output_dir, 'report')

    if options.skip_if_ok or options.skip_if_already_run:
        # Remove only the results file
        rm(options.results_file)
    else:
        if not options.keep_old_output_dir:
            # We don't want to keep old results. Just clean the new output_dir
            if os.path.exists(options.output_dir):
                rm(options.output_dir, True)
        else:
            # Move output_dir to old_output_dir
            if os.path.exists(options.old_output_dir):
                rm(options.old_output_dir, True)
            if os.path.exists(options.output_dir):
                mv(options.output_dir, options.old_output_dir)
            else:
                mkdir(options.old_output_dir)

    mkdir(options.output_dir)

    # For the testsuites that used gnatpython.testdriver.add_run_test_options,
    # the user has the option of requesting that the environment be dumped
    # in the form of a shell script inside the output_dir.  If requested,
    # do it now.
    if hasattr(options, 'dump_environ') and options.dump_environ:
        with open(os.path.join(options.output_dir, 'environ.sh'), 'w') as f:
            for var_name in sorted(os.environ):
                f.write('export %s=%s\n' %
                        (var_name, quote_arg(os.environ[var_name])))
Beispiel #3
0
    def to_workdir(self, wdir):
        """Switch to work directory WDIR, creating it if necessary. WDIR is
        expected to be either absolute or relative from the homedir."""

        self.to_homedir()
        mkdir(wdir)
        cd(wdir)
Beispiel #4
0
def xcov_instrument(gprsw,
                    covlevel,
                    isi_file,
                    extra_args=[],
                    dump_method='atexit',
                    gpr_obj_dir=None,
                    out=None,
                    err=None,
                    register_failure=True):
    """
    Run "gnatcov instrument" on a project.

    :param GPRswitches gprsw: Project file command line switches to honor.
    :param str covlevel: Coverage level for the instrumentation
        (--level argument).
    :param str isi_file: Name of the ISI file to create.
    :param list[str] extra_args: Extra arguments to append to the command line.
    :param bool dump_method: Method to dump coverage buffers (--dump-method)
        argument.
    :param None|str gpr_obj_dir: Optional name of the directory where gprbuild
        will create build artifacts. If left to None, assume they are produced
        in the current directory.

    See SUITE.tutils.xcov for the other supported options.
    """
    # Create the object directory so that gnatcov does not warn that it
    # does not exist. This is specific to the source trace mode because
    # we run gnatcov before gprbuild.
    if gpr_obj_dir:
        mkdir(gpr_obj_dir)

    args = (['instrument', '--level', covlevel, '--dump-method', dump_method] +
            gprsw.as_strings + extra_args + [isi_file])
    xcov(args, out=out, err=err, register_failure=register_failure)
Beispiel #5
0
 def to_subdir(self, dir):
     """
     Change the current directory to `dir` (a path relative to `self`'s home
     directory). Create it if needed.
     """
     self.to_homedir()
     mkdir(dir)
     cd(dir)
Beispiel #6
0
def main():
    """Run the testsuite"""
    options = __parse_options()
    assert os.path.exists(makedir("bin")), \
        "cannot find %s directory" % makedir("bin")
    assert os.path.exists(makedir("rbin")), \
        "cannot find %s directory" % makedir("rbin")
    env = Env()
    env.add_search_path("PYTHONPATH", os.getcwd())

    test_list = [t for t in filter_list('tests/*', options.run_test)
                 if os.path.isdir(t)]

    # Various files needed or created by the testsuite
    result_dir = options.output_dir
    results_file = result_dir + '/results'

    if os.path.exists(result_dir):
        rm(result_dir, True)

    mkdir(result_dir)

    discs = env.discriminants

    if options.discs:
        discs += options.discs

    def test_build_cmd(test, _):
        """Run the given test"""
        cmd = [sys.executable, 'run-test',
               '-d', ",".join(discs),
               '-o', result_dir,
               '-t', options.tmp,
               test]
        if options.verbose:
            cmd.append('-v')
        if options.host:
            cmd.append('--host=' + options.host)
        if options.target:
            cmd.append('--target=' + options.target)
        if not options.enable_cleanup:
            cmd.append('--disable-cleanup')
        return Run(cmd, bg=True)

    collect_result = generate_collect_result(
        result_dir, results_file, options.view_diffs)

    MainLoop(test_list, test_build_cmd, collect_result, options.mainloop_jobs)

    # Write report
    with open(result_dir + '/discs', 'w') as discs_f:
        discs_f.write(" ".join(discs))
    ReportDiff(result_dir, options.old_result_dir).txt_image(
        result_dir + '/report')
Beispiel #7
0
def main():
    """Run the testsuite"""
    options = __parse_options()
    assert os.path.exists(makedir("bin")), \
        "cannot find %s directory" % makedir("bin")
    assert os.path.exists(makedir("rbin")), \
        "cannot find %s directory" % makedir("rbin")
    env = Env()
    env.add_search_path("PYTHONPATH", os.getcwd())

    test_list = [
        t for t in filter_list('tests/*', options.run_test) if os.path.isdir(t)
    ]

    # Various files needed or created by the testsuite
    result_dir = options.output_dir
    results_file = result_dir + '/results'

    if os.path.exists(result_dir):
        rm(result_dir, True)

    mkdir(result_dir)

    discs = env.discriminants

    if options.discs:
        discs += options.discs

    def test_build_cmd(test, _):
        """Run the given test"""
        cmd = [
            sys.executable, 'run-test', '-d', ",".join(discs), '-o',
            result_dir, '-t', options.tmp, test
        ]
        if options.verbose:
            cmd.append('-v')
        if options.host:
            cmd.append('--host=' + options.host)
        if options.target:
            cmd.append('--target=' + options.target)
        if not options.enable_cleanup:
            cmd.append('--disable-cleanup')
        return Run(cmd, bg=True)

    collect_result = generate_collect_result(result_dir, results_file,
                                             options.view_diffs)

    MainLoop(test_list, test_build_cmd, collect_result, options.mainloop_jobs)

    # Write report
    with open(result_dir + '/discs', 'w') as discs_f:
        discs_f.write(" ".join(discs))
    ReportDiff(result_dir,
               options.old_result_dir).txt_image(result_dir + '/report')
Beispiel #8
0
 def create(self, output_dir):
     mkdir(output_dir)
     executable = os.path.join(output_dir, self.name)
     if Env().build.os.name == 'windows':
         # On Windows, suffixes executables by .exe
         executable += '.exe'
     with open(executable, 'w') as fd:
         fd.write(self._generate_code() % {
             'project_name': self.project.name,
             'project_root': self.project.install_dir
         })
         fd.write('\n')
     chmod('+x', executable)
Beispiel #9
0
 def create(self, output_dir):
     mkdir(output_dir)
     executable = os.path.join(output_dir, self.name)
     if Env().build.os.name == 'windows':
         # On Windows, suffixes executables by .exe
         executable += '.exe'
     with open(executable, 'w') as fd:
         fd.write(self._generate_code() % {
             'project_name': self.project.name,
             'project_root': self.project.install_dir
         })
         fd.write('\n')
     chmod('+x', executable)
Beispiel #10
0
    def setup_result_dir(self):
        """Create the output directory in which the results are stored."""
        if os.path.isdir(self.old_output_dir):
            rm(self.old_output_dir, True)
        if os.path.isdir(self.output_dir):
            mv(self.output_dir, self.old_output_dir)
        mkdir(self.output_dir)

        if self.main.options.dump_environ:
            with open(os.path.join(self.output_dir, 'environ.sh'), 'w') as f:
                for var_name in sorted(os.environ):
                    f.write('export %s=%s\n' %
                            (var_name, quote_arg(os.environ[var_name])))
Beispiel #11
0
    def prepare_working_space(self):
        """Prepare working space.

        Set the working space in self.work_dir. This resets the working
        directory and copies the test into <work_dir>/src. This
        directory can be used to hold temp files as it will be
        automatically deleted at the end of the test by the clean method
        """
        # At this stage the test should be executed so start copying test
        # sources in a temporary location.
        rm(self.work_dir, True)
        mkdir(self.work_dir)
        try:
            shutil.copytree(self.test, self.work_dir + '/src')
        except shutil.Error:
            print >> sys.stderr, "Error when copying %s in %s" % (
                self.test, self.work_dir + '/src')
Beispiel #12
0
    def __qm_build (self, part):
        """Build one PART of using the Qualifying Machine."""

        announce ("building %s %s" % (self.this_docformat, part.upper()))

        os.chdir (
            os.path.join (self.repodir, "qualification", "qm")
            )
        
        # The qmachine model might use the "build" directory as
        # a repository, and it has to preexist:

        mkdir ("build")
        run ("qmachine model.xml -l scripts/generate_%s_%s.py" \
                 % (part, self.this_docformat),
             env={'GENBUNDLE_DOLEVEL': self.o.dolevel})

        self.__latch_into (
                dir=self.itemsdir(), part=part, toplevel=False)
Beispiel #13
0
    def build_kit (self):
        announce ("building %s kit" % self.this_docformat)

        os.chdir (self.workdir)

        # The kit name is computed as:
        #
        #    gnatcov-qualkit-<kitid>-<YYYYMMDD>
        #
        # where <YYYYMMDD> is the kit production stamp (now), and <kitid> is
        # computed from the git branch off which the artifacts are taken. The
        # git branch name might contain the "qualkit" indication already.

        today = date.today()
        gitbranch = current_gitbranch_at(self.repodir)

        kitprefix = (
            "gnatcov-qualkit" if "qualkit" not in gitbranch
            else "gnatcov"
            )

        kitid = gitbranch
        kitid = kitid.replace('/', '-')
        kitid = kitid.replace('.', '_')

        # If we are re-constructing a kit with some parts just rebuilt, target
        # the specified version (stamp) and arrange to keep the old elements
        # in place:

        kitstamp = (
            self.o.rekit if self.o.rekit
            else "%4d%02d%02d" % (today.year, today.month, today.day)
            )
        kitname = "%s-%s-%s" % (kitprefix, kitid, kitstamp)             
        kitdir = "%s-%s" % (kitname, self.this_docformat)

        mkdir (kitdir)

        [self.__relocate_into (dir=kitdir, part=part) for part in self.o.parts]

        run ("zip -q -r %(kitdir)s.zip %(kitdir)s" % {"kitdir": kitdir})
Beispiel #14
0
    def _compile(self, test_driver, compile_unit_switches):
        mkdir('{}-obj'.format(test_driver))

        project_file = gprfor(
            mains=[test_driver + '.c'],
            prjid=test_driver,
            srcdirs=['..'] + self.extra_sourcedirs,
            objdir='{}-obj'.format(test_driver),
            langs=['C', 'ASM'],
            compiler_extra='\n'.join(
                ('for Switches("{}") use '
                 ' Compiler\'Default_Switches ("C") & ({});').format(
                    cu, self.fmt_list(switches)
                )
                for cu, switches in compile_unit_switches.iteritems()
            )
        )

        # We never want the testuite optimization options or source coverage
        # options to interfere with object coverage testcases as these are very
        # sensitive to code generation.
        gprbuild(project_file, scovcargs=False, suitecargs=False)
Beispiel #15
0
# build directory (so not on the Python search path).
config = imp.load_source('config', os.path.join(BASE_DIR, 'tests',
                                                'config.py'))

# Shared configuration files are in tests/conf
CONF_DIR = os.path.join(SRC_DIR, 'tests', 'confs')

EXE_EXT = Env().target.os.exeext

OUTPUT_FILENAME = os.path.join(Env().log_dir, TEST_NAME)

USE_INSTALLED = config.use_installed == "yes"

try:
    if not os.path.isdir(os.path.dirname(OUTPUT_FILENAME)):
        mkdir(os.path.dirname(OUTPUT_FILENAME))
except FileUtilsError:
    # Ignore errors, multiple tests can be run in parallel
    pass


def assert_exists(filename):
    """Assert that the given filename exists"""
    assert os.path.exists(filename), "%s not found" % filename


def terminate(handle):
    """Terminate safely a process spawned using Popen"""

    if sys.platform.startswith('win'):
        try:
Beispiel #16
0
def main():
    """Run the testsuite and generate reports"""
    # Parse the command lines options
    m = Main(add_targets_options=True)
    add_mainloop_options(m)
    add_run_test_options(m)
    m.add_option('--diffs',
                 dest='diffs',
                 action='store_true',
                 default=False,
                 help='show diffs on stdout')
    m.add_option("--old-result-dir",
                 type="string",
                 default=None,
                 help="Old result dir (to generate the report)")
    m.add_option('-b',
                 '--build-dir',
                 dest='build_dir',
                 help='separate PolyORB build directory')
    m.add_option('--testsuite-src-dir',
                 dest='testsuite_src_dir',
                 help='path to polyorb testsuite sources')
    m.add_option('--coverage',
                 dest='coverage',
                 action='store_true',
                 default=False,
                 help='generate coverage information')
    m.parse_args()

    # Various files needed or created by the testsuite
    results_file = m.options.output_dir + '/results'
    report_file = m.options.output_dir + '/report'

    if not m.options.failed_only:
        rm(m.options.output_dir, True)
        mkdir(m.options.output_dir)

    # Add current directory in PYTHONPATH (to find test_utils.py)
    env = Env()
    env.add_search_path('PYTHONPATH', os.path.join(os.getcwd(), 'tests'))
    fixed_support_dir = os.path.join(os.getcwd(), 'fixed_support_dir')
    env.add_search_path('FIXED_SUPPORT_DIR', fixed_support_dir)
    env.add_path(os.path.join(fixed_support_dir))
    env.add_path('.')  # many tests expect '.' in the PATH

    # Avoid extra debug traces
    os.environ['POLYORB_LOG_DEFAULT'] = 'error'

    # Generate the discs list for test.opt parsing
    # Always add 'ALL'
    common_discs = Env().discriminants

    # Be backward compatible with the old IDL tests
    # Set the polyorb discriminant and export the IDLCOMP
    # environment variable.
    common_discs.append('PolyORB')
    common_discs.append('PolyORB_IAC')
    os.environ['IDLCOMP'] = 'iac'

    # Retrieve also the polyorb specific discriminants
    p = Run([
        which('bash'),
        which('polyorb-config').replace('\\', '/'), '--config'
    ])

    # First find the support application perso.
    match = re.search('Application *personalities *: (.+)', p.out)
    if match is not None:
        common_discs += ['app_%s' % k for k in match.group(1).split()]

    # Then the supported protocols
    match = re.search('Protocol *personalities *: (.+)', p.out)
    if match is not None:
        common_discs += ['proto_%s' % k for k in match.group(1).split()]

    # Then the supported services
    match = re.search('Services *: (.+)', p.out)
    if match is not None:
        common_discs += ['serv_%s' % k for k in match.group(1).split()]

    # Do we have ssl support ?
    if re.search('SSL *support *: *yes', p.out):
        common_discs.append('ssl_support')

    with open(m.options.output_dir + '/discs', 'w') as f_disk:
        f_disk.write(", ".join(common_discs))

    # Expand ~ and ~user contructions for user PATH
    if m.options.build_dir is None:
        m.options.build_dir = os.path.join(os.getcwd(), os.pardir)
    else:
        m.options.build_dir = os.path.expanduser(m.options.build_dir)

    if m.options.testsuite_src_dir is None:
        m.options.testsuite_src_dir = os.path.join(os.getcwd())
    else:
        m.options.testsuite_src_dir = os.path.expanduser(
            m.options.testsuite_src_dir)

    # Compute the test list
    if m.args:
        test_glob = m.args[0]
    else:
        test_glob = None
    test_list = filter_list('./tests/*/*/*/test.py', test_glob)
    if os.path.isdir('regtests'):
        test_list.extend(filter_list('./regtests/*/test.*', test_glob))

    collect_result = generate_collect_result(m.options.output_dir,
                                             results_file, m.options.diffs)
    run_testcase = generate_run_testcase('tests/run-test.py', common_discs,
                                         m.options)

    os.environ['TEST_CONFIG'] = os.path.join(os.getcwd(), 'env.dump')
    env.options = m.options
    env.log_dir = os.path.join(os.getcwd(), 'log')
    env.store(os.environ['TEST_CONFIG'])

    if len(test_list) == 0:
        logger.error("No matching test found")
        return

    MainLoop(test_list, run_testcase, collect_result, m.options.mainloop_jobs)

    # Generate the report file
    ReportDiff(m.options.output_dir,
               m.options.old_result_dir).txt_image(report_file)
Beispiel #17
0
def create_fake_ada_compiler(comp_dir,
                             comp_target,
                             gnat_version,
                             gcc_version,
                             comp_is_cross=False,
                             runtimes=["native", "sjlj"],
                             create_symlink=False,
                             create_ada_object_path=False):
    """
       Create directory defined by the comp_dir parameter and put fake Ada
       compiler directory tree there. If comp_is_cross is true, the compiler
       tools 'gnatmake', 'gcc', and 'gnatls' will be prefixed by the
       comp_target. If create_symlink is true, the first runtime from the
       runtimes will be made available as default through an 'adalib' symbolic
       link.
       If create_ada_object_path is true, that file will be created to simulate
       a Windows install.
    """

    if comp_is_cross:
        comp_prefix = comp_target + '-'
    else:
        comp_prefix = ""

    arch = Arch()
    comp_dict = {
        'comp_target': comp_target,
        'gnat_version': gnat_version,
        'gcc_version': gcc_version,
        'comp_prefix': comp_prefix,
        'exeext': arch.os.exeext
    }

    mkdir(os.path.join(comp_dir, 'bin'))
    gnatls_adb = open(os.path.join(comp_dir, 'bin', 'gnatls.adb'), 'w')
    gnatls_adb.write("""
with Ada.Text_IO; use Ada.Text_IO;
with Ada.Command_Line; use Ada.Command_Line;
procedure gnatls is
begin
   if Argument_Count >= 1 and Argument (1) = "-v" then
        Put_Line ("GNATLS Pro %(gnat_version)s (20190507-89)");
   else
         Put ("Running gnatls");
         for J in 1 .. Argument_Count loop
             Put (" " & Argument (J));
         end loop;
   end if;
end gnatls;
""" % comp_dict)
    gnatls_adb.close()

    gcc_adb = open(os.path.join(comp_dir, 'bin', 'gcc.adb'), 'w')
    gcc_adb.write("""
with Ada.Text_IO; use Ada.Text_IO;
with Ada.Command_Line; use Ada.Command_Line;
procedure gcc is
begin
   if Argument_Count >= 1 and then Argument (1) = "-v" then
        Put_Line ("gcc version %(gcc_version)s 20131008 for GNAT Pro");
   elsif Argument_Count >= 1 and then Argument (1) = "--version" then
        Put_Line ("gcc (GCC) %(gcc_version)s");
   elsif Argument_Count >= 1 and then Argument (1) = "-dumpmachine" then
        Put_Line ("%(comp_target)s");
   else
         Put ("Running gcc");
         for J in 1 .. Argument_Count loop
             Put (" " & Argument (J));
         end loop;
   end if;
end gcc;
""" % comp_dict)
    gcc_adb.close()

    gnatmake_adb = open(os.path.join(comp_dir, 'bin', 'gnatmake.adb'), 'w')
    gnatmake_adb.write("""
with Ada.Text_IO; use Ada.Text_IO;
with Ada.Command_Line; use Ada.Command_Line;
procedure gnatmake is
begin
         Put ("Running gcc");
         for J in 1 .. Argument_Count loop
             Put (" " & Argument (J));
         end loop;
end gnatmake;
""")
    gnatmake_adb.close()

    for tool in ['gnatmake', 'gcc', 'gnatls']:
        comp_dict['bin'] = tool

        # Do not run gnatmake in the same directory with the fake tools sources
        # to avoid using just created fake tools in the build process.

        Run([
            'gnatmake',
            os.path.join('bin', tool + '.adb'), '-o',
            os.path.join('bin', '%(comp_prefix)s%(bin)s%(exeext)s' % comp_dict)
        ],
            cwd=comp_dir)

    if comp_target == "dotnet":
        for dir in ("adalib", "adainclude"):
            mkdir(os.path.join(comp_dir, 'lib', 'dotgnat', dir))
    else:
        for runtime in runtimes:
            for dir in ("adalib", "adainclude"):
                mkdir(
                    os.path.join(comp_dir, 'lib', 'gcc', comp_target,
                                 gcc_version, 'rts-%s' % runtime, dir))

    libdir = os.path.join(comp_dir, 'lib', 'gcc', comp_target, gcc_version)

    # On Unix systems, we have a symbolic link for the default
    # runtime. gprconfig should automatically detect these are
    # the same two runtimes and only list "native".

    if create_symlink:
        os.symlink(os.path.join('rts-%s' % runtimes[0], 'adalib'),
                   os.path.join(libdir, 'adalib'))

    # Simulate windows system, with an ada_object_path file

    if create_ada_object_path:
        with open(os.path.join(libdir, 'ada_object_path'), 'w') as ada_obj:
            ada_obj.write("rts-%s/adalib" % runtimes[0])
Beispiel #18
0
 def to_subdir(self, dir):
     self.to_homedir()
     mkdir(dir)
     cd(dir)
Beispiel #19
0
def parse_scenario(filename):
    """Parse a scenario file and create the corresponding test directories"""
    scenario = open(filename)

    test_dict = {}
    current_section = SCENARIO_SECTION
    current_test = ""

    for line in scenario:
        if line.startswith('['):
            test_name = re.match(r'\[(.*) (.*)\]', line)
            if test_name:
                current_section = test_name.group(1)
                current_test = test_name.group(2)
                if current_section == TEST_SECTION:
                    test_dict[current_test] = {}

        elif current_section != SCENARIO_SECTION:
            # Do not parse scenario section.
            line_def = re.match(r'(.*)=(.*)', line)
            if line_def:
                left = line_def.group(1)
                right = line_def.group(2)
                if not current_section in test_dict[current_test]:
                    test_dict[current_test][current_section] = {}
                test_dict[current_test][current_section][left] = right

    scenario.close()
    full_name = os.path.basename(filename)
    sep = full_name.find('-')

    parent_dir = full_name[:sep]
    mkdir(parent_dir)

    scenario_dir = full_name[sep + 1:-5]
    mkdir(os.path.join(parent_dir, scenario_dir))
    cd(os.path.join(parent_dir, scenario_dir))

    for test_name in test_dict:
        test_type = test_dict[test_name][TEST_SECTION]['type']
        if test_type == 'client_server':
            mkdir(test_name)
            f = open(os.path.join(test_name, 'test.py'), 'w')
            f.write(
                CLIENT_SERVER_TEMPLATE % {
                    'client_cmd':
                    test_dict[test_name][CLIENT_SECTION]['command'],
                    'client_conf':
                    test_dict[test_name][CLIENT_SECTION].get(
                        'config_file', ''),
                    'server_cmd':
                    test_dict[test_name][SERVER_SECTION]['command'],
                    'server_conf':
                    test_dict[test_name][SERVER_SECTION].get(
                        'config_file', ''),
                })
            f.close()

            if 'expected_failure' in test_dict[test_name][TEST_SECTION]:
                f = open(os.path.join(test_name, 'test.opt'), 'w')
                f.write('ALL XFAIL\n')
                f.close()

        elif test_type == 'local':
            mkdir(test_name)
            f = open(os.path.join(test_name, 'test.py'), 'w')
            f.write(
                LOCAL_TEMPLATE % {
                    'command':
                    test_dict[test_name][TEST_SECTION]['command'],
                    'conf':
                    test_dict[test_name][TEST_SECTION].get('config_file', '')
                })
            f.close()

            if 'expected_failure' in test_dict[test_name][TEST_SECTION]:
                f = open(os.path.join(test_name, 'test.opt'), 'w')
                f.write('ALL XFAIL\n')
                f.close()
        else:
            print 'unknown type for test: ' + test_name
Beispiel #20
0
    def setup_workdir (self):

        announce ("setting up working dir at %s" % self.workdir)

        mkdir (self.workdir)
Beispiel #21
0
    def __init__(self,
                 test,
                 discs,
                 result_dir,
                 temp_dir=Env().tmp_dir,
                 enable_cleanup=True,
                 restricted_discs=None,
                 test_args=None,
                 failed_only=False,
                 default_timeout=780,
                 use_basename=True):
        """TestRunner constructor.

        :param test: location of the test
        :type test: str
        :param discs: list of discriminants
        :type discs: list[str]
        :param result_dir: directory in which results will be stored
        :type result_dir: str
        :param temp_dir: temporary directory used during test run
        :type temp_dir: str
        :param enable_cleanup: whether the temporary files needs to be removed
        :type enable_cleanup: bool
        :param restricted_discs: None or a list of discriminants
        :type restricted_discs:  list[str] | None
        :param test_args: ???
        :param failed_only: run failed only
        :type failed_only: bool
        :param default_timeout: timeout when executing a test
        :type default_timeout: int
        :param use_basename: if True use the test basename to get the test name
            else use the relative path
        :type use_basename: bool
        """
        self.test = test.rstrip('/')
        self.discs = discs
        self.cmd_line = None
        self.test_args = test_args
        self.enable_cleanup = enable_cleanup
        self.restricted_discs = restricted_discs
        self.skip = False  # if True, do not run execute()

        # Test name
        if use_basename:
            self.test_name = os.path.basename(self.test)
        else:
            self.test_name = os.path.relpath(self.test, os.getcwd())

        # Prefix of files holding the test result
        self.result_prefix = result_dir + '/' + self.test_name

        mkdir(os.path.dirname(self.result_prefix))

        # Temp directory in which the test will be run
        self.work_dir = os.path.realpath(
            os.path.join(temp_dir,
                         'tmp-test-%s-%d' % (self.test_name, os.getpid())))
        self.output = self.work_dir + '/tmpout'
        self.output_filtered = self.work_dir + '/tmpout.filtered'
        self.diff_output = self.work_dir + '/diff'
        self.cmdlog = self.work_dir + '/' + self.test_name + '.log'

        # Initial test status
        self.result = {'result': 'UNKNOWN', 'msg': '', 'is_failure': True}

        # Some tests save the pids of spawned background processes in
        # work_dir/.pids. The TEST_WORK_DIR environment variable is used to
        # pass the working directory location.
        os.environ['TEST_WORK_DIR'] = self.work_dir

        if failed_only:
            # Read old result now
            previous_result = self.read_result()
            if previous_result in IS_STATUS_FAILURE \
                    and not IS_STATUS_FAILURE[previous_result]:
                # We don't need to run this test. Return now
                self.skip = True
                return

        # Be sure to be a sane environment
        rm(self.result_prefix + '.result')
        rm(self.result_prefix + '.out')
        rm(self.result_prefix + '.expected')
        rm(self.result_prefix + '.diff')
        rm(self.result_prefix + '.log')
        rm(self.result_prefix + '.out.filtered')

        # Initialize options defaults (can be modified with test.opt).
        # By default a test is not DEAD, SKIP nor XFAIL. Its maximum execution
        # time is 780s. Test script is test.cmd and output is compared against
        # test.out.
        self.opt_results = {
            'RLIMIT': str(default_timeout),
            'DEAD': None,
            'XFAIL': False,
            'SKIP': None,
            'OUT': 'test.out',
            'CMD': 'test.cmd',
            'FILESIZE_LIMIT': None,
            'TIMING': None,
            'NOTE': None
        }
        self.opt_file = 'test.opt'

        # test.cmd have priority, if not found use test.py
        if not os.path.isfile(self.test +
                              '/test.cmd') and os.path.isfile(self.test +
                                                              '/test.py'):
            self.opt_results['CMD'] = 'test.py'