Exemple #1
0
def version(tool, nlines=1):
    """
    Return version information as reported by the execution of TOOL --version,
    expected on the first NLINES of output. If TOOL is not available from PATH,
    return a version text indicating unavailability.  If TOOL is 'gcc', append
    the target for which it was configured to the base version info.
    """

    # If TOOL is not on PATH, return a version text indicating unavailability.
    # This situation is legitimate here for gnatemu when running through a
    # probe, and if we happen to actually need the tool later on, we'll see
    # test failures anyway.
    if not which(tool):
        return 'N/A'

    # --version often dumps more than the version number on a line. A
    # copyright notice is typically found there as well. Our heuristic
    # here is to strip everything past the first comma.

    def version_on_line(text):
        cprpos = text.find(',')
        return text[0:cprpos] if cprpos != -1 else text

    tool_version_output = Run([tool, '--version']).out.split('\n')
    version_info = '\n'.join(
        [version_on_line(line) for line in tool_version_output[0:nlines]])

    if tool == 'gcc':
        gcc_target = Run([tool, '-dumpmachine']).out.strip()
        version_info += ' [%s]' % gcc_target

    return version_info
Exemple #2
0
    def run_postinstall(self):
        """Run cygwin postinstall scripts"""
        with open(self.cyglog('postinstall.log'), 'wb') as fd:
            os.chdir(self.root_dir)

            # Compute the list of postinstall scripts
            pscripts = []
            for ext in ('.sh', '.bat', '.cmd', '.dash'):
                pscripts += ls('etc/postinstall/*' + ext)
            pscripts.sort()

            # Set some env variables needed by the postinstall scripts
            os.environ['SHELL'] = '/bin/bash'
            os.environ['CYGWINROOT'] = self.root_dir
            os.environ['TERM'] = 'dump'
            for p in (('usr', 'bin'), ('bin', ), ('usr', 'sbin'), ('sbin', )):
                os.environ['PATH'] = os.path.join(self.root_dir, *p) + ';' + \
                    os.environ['PATH']

            # run postinstall scripts
            for index, ps in enumerate(pscripts):
                logging.info('Run postinstall (%s/%s) %s' %
                             (index, len(pscripts), os.path.basename(ps)))
                fd.write('run %s\n' % ps)

                if ps.endswith('.dash'):
                    Run([os.path.join(self.root_dir, 'bin', 'dash.exe'), ps],
                        output=fd)
                elif ps.endswith('.sh'):
                    Run([os.path.join(self.root_dir, 'bin', 'bash.exe'),
                         '--norc', '--noprofile', ps], output=fd)
                mv(ps, ps + '.done')
Exemple #3
0
    def run(self) -> None:
        """Run the job."""
        cmd_options = self.cmd_options

        # Do non blocking spawn followed by a wait in order to have
        # self.proc_handle set. This allows support for interrupt.
        cmd_options["bg"] = True
        with self.lock:
            if self.interrupted:  # defensive code
                logger.debug("job %s has been cancelled", self.uid)
                return
            try:
                cmdline = self.cmdline
                assert cmdline is not None, "cmdline cannot be None"

                proc_handle = Run(cmdline, **cmd_options)
                self.proc_handle = proc_handle
            except Exception:
                logger.exception("error when spawing job %s", self.uid)
                self.__spawn_error = True
                return
        proc_handle.wait()
        logger.debug(
            "job %s status %s (pid:%s)", self.uid, proc_handle.status, proc_handle.pid
        )
Exemple #4
0
    def run(self):
        """Run the job."""
        cmd_options = self.cmd_options

        # Do non blocking spawn followed by a wait in order to have
        # self.proc_handle set. This allows support for interrupt.
        cmd_options['bg'] = True
        self.proc_handle = Run(self.cmdline, **cmd_options)
        self.proc_handle.wait()
Exemple #5
0
class ProcessJob(Job):
    """Specialized version of Job that spawn processes."""

    __metaclass__ = abc.ABCMeta

    def __init__(self, uid, data, notify_end):
        super(ProcessJob, self).__init__(uid, data, notify_end)
        self.proc_handle = None

    def run(self):
        """Run the job."""
        cmd_options = self.cmd_options

        # Do non blocking spawn followed by a wait in order to have
        # self.proc_handle set. This allows support for interrupt.
        cmd_options['bg'] = True
        self.proc_handle = Run(self.cmdline, **cmd_options)
        self.proc_handle.wait()

    @abc.abstractproperty
    def cmdline(self):
        """Return the command line of the process to be spawned.

        :return: the command line
        :rtype: list[str]
        """
        pass

    @property
    def cmd_options(self):
        """Process options.

        Important note: don't use PIPE for output or error parameters this can
        cause locking error in case the process is interrupted. The default
        redirect output and error to the console.

        The pipe behavior can easily be emulated by writing to a file and
        modifying the run method to read the file content when the process
        finish.

        :return: options for e3.os.process.Run as a dict
        :rtype: dict
        """
        return {'output': None}

    def interrupt(self):
        """Kill running process tree."""
        if hasattr(self, 'proc_handle') and \
                self.proc_handle and \
                self.proc_handle.is_running():
            logger.debug('interrrupt job %s', self.uid)
            self.proc_handle.kill(recursive=True)
            self.interrupted = True
Exemple #6
0
def make_gnatcoll(work_dir, gcov=False):
    """Build gnatcoll core with or without gcov instrumentation.

    :param work_dir: working directory. gnatcoll is built in `build` subdir
        and installed in `install` subdir
    :type work_dir: str
    :param gcov: if False then build gcov in PROD mode, otherwise
        build it with gcov instrumentation in DEBUG mode
    :type gcov: bool
    :return: a triplet (project path, source path, object path)
    :rtype: (str, str, str)
    :raise AssertError: in case compilation of installation fails
    """

    # Create build tree structure
    build_dir = os.path.join(work_dir, 'build')
    install_dir = os.path.join(work_dir, 'install')
    mkdir(build_dir)
    mkdir(install_dir)

    # Compute make invocation
    for binding in ('gmp', 'iconv', 'python'):
        logging.info('Compiling gnatcoll %s (gcov=%s)', binding, gcov)
        setup = os.path.join(GNATCOLL_ROOT_DIR, binding, 'setup.py')
        obj_dir = os.path.join(build_dir, binding)
        mkdir(obj_dir)

        build_cmd = [sys.executable, setup, 'build', '--disable-shared']
        install_cmd = [sys.executable, setup, 'install',
                       '--prefix', install_dir]

        if gcov:
            build_cmd += ['--gpr-opts', '-cargs', '-fprofile-arcs',
                          '-ftest-coverage',
                          '-largs', '-lgcov',
                          '-gargs', '-XBUILD=DEBUG']
        else:
            build_cmd += ['-gpr-opts', '-XBUILD=PROD']

        # Build & Install
        p = Run(build_cmd, cwd=obj_dir)
        assert p.status == 0, \
            "gnatcoll %s build failed:\n%s" % (binding, p.out)
        logging.debug('build:\n%s', p.out)

        p = Run(install_cmd, cwd=obj_dir)
        assert p.status == 0, \
            "gnatcoll %s installation failed:\n%s" % (binding, p.out)
        logging.debug('install:\n%s', p.out)

    return (os.path.join(install_dir, 'share', 'gpr'),
            os.path.join(install_dir, 'include'),
            build_dir)
Exemple #7
0
    def run(self):
        """Launch the UxAS instance.

        :return: the process object
        :rtype: e3.os.process.Run
        """
        with closing(tempfile.NamedTemporaryFile(mode='wb',
                                                 delete=False)) as fd:
            self.cfg_path = fd.name
            fd.write(self.cfg.dump())

        self.process = Run([self.uxas_bin, '-cfgPath', self.cfg_path],
                           output=None,
                           bg=True)
        return self.process
Exemple #8
0
def make_gnatcoll_for_gcov(work_dir, components):
    """Build gnatcoll core with gcov instrumentation.

    :param work_dir: working directory. gnatcoll is built in `build` subdir
        and installed in `install` subdir
    :type work_dir: str
    :return: a triplet (project path, source path, object path)
    :rtype: (str, str, str)
    :raise AssertError: in case compilation of installation fails
    """
    logging.info('Compiling gnatcoll with gcov instrumentation')
    build_dir = os.path.join(work_dir, 'build')
    install_dir = os.path.join(work_dir, 'install')
    mkdir(build_dir)
    mkdir(install_dir)

    # Add the resulting library into the GPR path
    Env().add_search_path('GPR_PROJECT_PATH',
                          os.path.join(install_dir, 'share', 'gpr'))
    Env().add_path(os.path.join(install_dir, 'bin'))

    for component in components:
        logging.info('Compiling: %s', component)
        gcov_options = '-cargs -fprofile-arcs -ftest-coverage -gargs'
        component_dir = COMPONENT_PROPERTIES[component].get(
            'component', component)

        if COMPONENT_PROPERTIES[component].get('is_bin'):
            gcov_options += ' -largs -lgcov -gargs'

        make_gnatcoll_cmd = [
            'make', '-f',
            os.path.join(GNATCOLL_ROOT_DIR, component_dir, 'Makefile'),
            'BUILD=DEBUG',
            'GPRBUILD_OPTIONS=%s' % gcov_options,
            'ENABLE_SHARED=no'] + \
            COMPONENT_PROPERTIES[component].get('make_args', [])

        p = Run(make_gnatcoll_cmd, cwd=build_dir)
        assert p.status == 0, "gnatcoll build failed:\n%s" % p.out

        p = Run(make_gnatcoll_cmd + ['prefix=%s' % install_dir, 'install'],
                cwd=build_dir)
        assert p.status == 0, "gnatcoll installation failed:\n%s" % p.out

    return (os.path.join(install_dir, 'share',
                         'gpr'), os.path.join(install_dir, 'include'),
            os.path.join(build_dir, 'obj', 'static'))
Exemple #9
0
def main():
    """Run e3-test script."""
    m = Main()

    # Ignore arguments here as they are arguments for the actual testsuite
    m.parse_args(known_args_only=True)

    # Find first the tool configuration file. Keep track of current directory
    # that will be used to select the test subset automatically.
    cwd = os.path.abspath(os.getcwd())
    root_dir = cwd
    while not os.path.isfile(os.path.join(root_dir, 'e3-test.yaml')):
        new_root_dir = os.path.dirname(root_dir)
        if new_root_dir == root_dir:
            logging.error("cannot find e3-test.yaml")
            return 1
        root_dir = new_root_dir
    config_file = os.path.join(root_dir, 'e3-test.yaml')

    with open(config_file, 'rb') as fd:
        config = yaml.load(fd)

    if 'main' not in config:
        logging.error('cannot find testsuite main')
        return 1
    p = Run([
        sys.executable,
        os.path.join(root_dir, config['main']),
        os.path.relpath(cwd, root_dir) + '/'
    ] + config.get('default_args', []),
            output=None,
            cwd=root_dir)
    return p.status
Exemple #10
0
 def simple_run(
     self,
     cmd,
     env=None,
     catch_error=True,
     output=PIPE,
     error=STDOUT,
     analyze_output=True,
 ):
     """ generic TestDriver.shell or e3.os.process.Run runner"""
     if self.driver is not None:
         if env is not None:
             effective_env = dict(os.environ)
             effective_env.update(env)
         return self.driver.shell(
             cmd,
             env=effective_env,
             catch_error=catch_error,
             analyze_output=analyze_output,
         )
     else:
         p = Run(cmd, env=env, output=output, error=error, ignore_environ=False)
         if catch_error and p.status != 0:
             print(str(cmd) + " returned " + str(p.status))
             print("stdout\n" + p.out)
             print("stderr\n" + p.err)
             raise TestAbortWithFailure("non-zero exit status")
         else:
             return p
Exemple #11
0
def gnatcov_run(driver, cmd, test_name=None, result=None, **kwargs):
    """
    Wrapper for `bin_check_call` that runs the process under
    "gnatcov run" and that produces a checkpoint in
    `driver.env.checkpoints_dir` for the corresponding partial coverage report.
    """
    test_name = test_name or driver.test_env['test_name']
    trace_file = os.path.join(driver.test_env['working_dir'],
                              '{}.trace'.format(test_name))
    checkpoint_file = os.path.join(driver.env.checkpoints_dir,
                                   '{}.ckpt'.format(test_name))

    cmd = ['gnatcov', 'run', '-o', trace_file, '-eargs'] + cmd
    result = bin_check_call(driver, cmd, test_name, result, **kwargs)

    p = Run(['gnatcov', 'coverage',
             '--level={}'.format(COVERAGE_LEVEL),
             '--scos=@{}'.format(driver.env.ali_files_list),
             '--save-checkpoint={}'.format(checkpoint_file),
             trace_file])
    if p.status:
        logging.error('converting gnatcov trace file to checkpoint failed:\n'
                      '{}'.format(p.out))

    return result
Exemple #12
0
def check_call(driver, cmd, test_name=None, result=None, **kwargs):
    if 'cwd' not in kwargs and 'working_dir' in driver.test_env:
        kwargs['cwd'] = driver.test_env['working_dir']
    process = Run(cmd, **kwargs)
    if result is None:
        result = driver.result
    if test_name is None:
        test_name = driver.test_name
    result.processes.append({
        'output': Log(process.out),
        'status': process.status,
        'cmd': cmd,
        'run_args': kwargs
    })
    result.out += process.out

    # Append the status code and process output to the log to ease post-mortem
    # investigation.
    result.log += 'Status code: {}\n'.format(process.status)
    result.log += 'Output:\n'
    result.log += process.out

    if process.status != 0:
        result.set_status(TestStatus.FAIL, 'command call fails')
        driver.push_result(result)
        raise TestAbort
    return process
Exemple #13
0
def nm_symbols(binary):
    """Return a SymbolList from a binary file"""

    output = 'nm.out'
    error = 'nm.err'

    triplet = os.environ['target-triplet']
    args = [triplet + '-nm', binary]
    p = Run(args, output=output, error=error)

    if p.status != 0:
        print("nm failed:")
        print(contents_of(output))
        print(contents_of(error))

    result = SymbolList()
    for line in contents_of(output).splitlines():
        addr, typ, symbol = line.split(' ')

        if typ in [
                'A', 'B', 'C', 'D', 'd', 'G', 'g', 'R', 'r', 'S', 's', 'T', 't'
        ]:
            result.append(symbol, int('0x' + addr, base=16))

    return result
Exemple #14
0
    def __init__(self, num):
        """Start a xvfb X11 server

        PARAMETERS
          num: the display number
        """
        self.num = num

        with tempfile.NamedTemporaryFile(suffix="xvfb") as f:
            xvfb_file_name = f.name

        # unset TMPDIR around call to Xvfb, to workaround
        # bug in Ubuntu: see
        # bugs.launchpad.net/ubuntu/+source/xorg-server/+bug/972324
        old_tmpdir = None
        if "TMPDIR" in os.environ:
            old_tmpdir = os.environ["TMPDIR"]
            os.environ["TMPDIR"] = ""

        command = ["Xvfb", ":%s" % num, "-screen", "0", "1600x1200x24", "-ac"]

        self.xvfb_handle = Run(command,
                               bg=True,
                               output=xvfb_file_name,
                               error=STDOUT)

        if old_tmpdir is not None:
            os.environ["TMPDIR"] = old_tmpdir
    def run_and_log(self, cmd, **kwargs):
        """
        Wrapper around e3.os.process.Run to log processes.

        Logging the processes that are run in each testcases is very useful for
        debugging.
        """

        # If code coverage is requested, leave a chance to gnatcov to decorate
        # the execution of the subprogram in order to make it contribute to
        # code coverage.
        if self.env.gnatcov:
            kwargs = self.env.gnatcov.decorate_run(self, kwargs)

        process = Run(cmd, **kwargs)

        self.result.processes.append({
            'cmd': cmd,
            'run_args': kwargs,
            'status': process.status,
            'output': Log(process.out)
        })
        self.result.out += process.out

        return process
Exemple #16
0
    def create_data_dir(self, root_dir: str) -> None:
        """Create data to be pushed to bucket used by cloudformation for resources."""
        # Create directory specific to that lambda
        package_dir = os.path.join(root_dir, name_to_id(self.name), "package")

        # Install the requirements
        if self.requirement_file is not None:
            p = Run(
                python_script("pip")
                + ["install", f"--target={package_dir}", "-r", self.requirement_file],
                output=None,
            )
            assert p.status == 0

        # Copy user code
        self.populate_package_dir(package_dir=package_dir)

        # Create an archive
        create_archive(
            f"{self.name}_lambda.zip",
            from_dir=package_dir,
            dest=root_dir,
            no_root_dir=True,
        )

        # Remove temporary directory
        rm(package_dir, recursive=True)
Exemple #17
0
def produce_report(output_dir, checkpoint_list, src_dir):
    """Produce a coverage reports.

    :param str output_dir: Name of the directory to contain the DHTML coverage
        report.
    :param str checkpoint_list: Name of the file that contains the list of
        checkpoints to use.
    :param str src_dir: Name of the directory that contains installed sources.
    """
    args = ['gnatcov', 'coverage', '--annotate=dhtml',
            '--level={}'.format(COVERAGE_LEVEL),
            '--output-dir={}'.format(output_dir),
            '--checkpoint=@{}'.format(checkpoint_list),

            # TODO: GNATcoverage is not be able to find the source file for a
            # unit that is not used by any test program. This is a problem for
            # units that are not tested at all. Let it know where to find the
            # source file to avoid spurious warnings. Note that these units are
            # reported as uncovered in any case.
            '--source-search={}'.format(src_dir)]
    p = Run(args, output=None)
    if p.status:
        logging.error('could not produce the coverage report:\n'
                      '{}'.format(p.out))
    elif p.out:
        logging.info('output of "gnatcov coverage" is not empty:\n'
                     '{}'.format(p.out))
Exemple #18
0
    def run(self, cmd: List[str], role_arn: str, session_duration: int,
            **kwargs: Any) -> Run:
        """Execute a command with credentials to assume role role_arn.

        :param cmd: command to execute
        :role_arn: Arn of the role to be used by the command
        :session_duration: session duration in seconds or None for default
        :param kwargs: additional parameters to provide to e3.os.process.Run
        :return: Result of the call to Run for the command
        """
        credentials = self.assume_role_get_credentials(role_arn,
                                                       "aws_run_session",
                                                       session_duration,
                                                       as_env_var=True)

        if "env" not in kwargs:
            if "ignore_environ" not in kwargs:
                kwargs["ignore_environ"] = False
            kwargs["env"] = credentials
        else:
            kwargs["env"] = dict(kwargs["env"]).update(credentials)

        aws_p = Run(cmd, **kwargs)

        if aws_p.status:
            raise AWSSessionRunError(
                f"{cmd} failed (exit status: {aws_p.status})",
                origin="aws_session_cli_cmd",
                process=aws_p,
            )
        return aws_p
Exemple #19
0
def gprbuild(args, output='gprbuild.out', error='gprbuild.err'):
    p = Run(['gprbuild'] + args, output=output, error=error)

    if p.status != 0:
        print("Build failed:")
        print(contents_of(output))
        print(contents_of(error))
Exemple #20
0
    def run_and_check(self, argv, for_debug=False, append_output=False,
                      log_errors=True):
        """
        Run a subprocess with `argv` and check it completes with status code 0.

        In case of failure, the test output is appended to the actual output
        and a TestError is raised.
        """
        program = argv[0]

        p = Run(argv, cwd=self.working_dir(),
                timeout=self.timeout,
                output=PIPE,
                error=STDOUT)

        if append_output:
            self.result.out += p.out

        if p.status != 0:
            self.result.out += (
                '{} returned status code {}\n'.format(program, p.status)
            )
            self.result.out += p.out
            if log_errors:
                logging.error(p.out)
            raise TestError(
                '{} returned status code {}'.format(program, p.status)
            )

        return p.out
Exemple #21
0
def check_call(driver: TestDriver,
               cmd: List[str],
               test_name: Optional[str] = None,
               result: Optional[TestResult] = None,
               **kwargs: Any) -> Run:
    if "cwd" not in kwargs and "working_dir" in driver.test_env:
        kwargs["cwd"] = driver.test_env["working_dir"]
    if result is None:
        result = driver.result
    if test_name is None:
        test_name = driver.test_name
    process = Run(cmd, **kwargs)
    result.processes.append({
        "output": Log(process.out),
        "status": process.status,
        "cmd": cmd,
        "run_args": kwargs,
    })

    # Append the status code and process output to the log to ease post-mortem
    # investigation.
    result.log += "Status code: {}\n".format(process.status)
    result.log += "Output:\n"
    result.log += process.out

    if process.status != 0:
        result.set_status(TestStatus.FAIL, "command call fails")
        driver.push_result(result)
        raise TestAbort
    return process
def make_gnatcoll(work_dir, gcov=False):
    """Build gnatcoll core with or without gcov instrumentation.

    :param work_dir: working directory. gnatcoll is built in `build` subdir
        and installed in `install` subdir
    :type work_dir: str
    :param gcov: if False then build gcov in PROD mode, otherwise
        build it with gcov instrumentation in DEBUG mode
    :type gcov: bool
    :return: a triplet (project path, source path, object path)
    :rtype: (str, str, str)
    :raise AssertError: in case compilation of installation fails
    """
    logging.info('Compiling gnatcoll (gcov=%s)' % gcov)

    # Create build tree structure
    build_dir = os.path.join(work_dir, 'build')
    install_dir = os.path.join(work_dir, 'install')
    mkdir(build_dir)
    mkdir(install_dir)

    # Compute make invocation
    make_gnatcoll_cmd = [
        'make', '-f',
        os.path.join(GNATCOLL_ROOT_DIR, 'Makefile'), 'ENABLE_SHARED=no'
    ]
    if gcov:
        make_gnatcoll_cmd += [
            'BUILD=DEBUG',
            'GPRBUILD_OPTIONS=-cargs -fprofile-arcs -ftest-coverage -gargs'
        ]
    else:
        make_gnatcoll_cmd += ['BUILD=PROD']

    # Build & Install
    p = Run(make_gnatcoll_cmd, cwd=build_dir, timeout=DEFAULT_TIMEOUT)
    assert p.status == 0, "gnatcoll build failed:\n%s" % p.out

    p = Run(make_gnatcoll_cmd + ['prefix=%s' % install_dir, 'install'],
            cwd=build_dir,
            timeout=DEFAULT_TIMEOUT)
    assert p.status == 0, "gnatcoll installation failed:\n%s" % p.out

    return (os.path.join(install_dir, 'share', 'gpr'),
            os.path.join(install_dir, 'include', 'gnatcoll'),
            os.path.join(build_dir, 'obj', 'gnatcoll', 'static'))
Exemple #23
0
        def s3_cp(from_path: str, s3_url: str) -> bool:
            cmd = ["s3", "cp", f"--sse={self.sse}"]
            if self.aws_profile:
                cmd.append(f"--profile={self.aws_profile}")
            cmd += [from_path, s3_url]

            s3 = Run(python_script("aws") + cmd, output=None)
            return s3.status == 0
Exemple #24
0
        def s3_cp(from_path, s3_url):
            cmd = ["s3", "cp", "--sse=%s" % self.sse]
            if self.aws_profile:
                cmd.append("--profile=%s" % self.aws_profile)
            cmd += [from_path, s3_url]

            s3 = Run(python_script("aws") + cmd, output=None)
            return s3.status == 0
Exemple #25
0
class UxAS(object):
    """An UxAS instance."""
    def __init__(self, entity_id, entity_type='Aircraft', uxas_bin=None):
        """Initialize an UxAS instance.

        :param entity_id: the entity id
        :type entity_id: str
        :param entity_type: the entity type
        :type entity_type: str
        :param uxas_bin: location of the uxas executable. If None try to find
            uxas on the path.
        :type uxas_bin: str | None
        """
        if uxas_bin is None:
            self.uxas_bin = which('uxas')
        else:
            self.uxas_bin = uxas_bin

        self.cfg_path = None
        self.cfg = UxASConfig(entity_id, entity_type)
        self.process = None

    def run(self):
        """Launch the UxAS instance.

        :return: the process object
        :rtype: e3.os.process.Run
        """
        with closing(tempfile.NamedTemporaryFile(mode='wb',
                                                 delete=False)) as fd:
            self.cfg_path = fd.name
            fd.write(self.cfg.dump())

        self.process = Run([self.uxas_bin, '-cfgPath', self.cfg_path],
                           output=None,
                           bg=True)
        return self.process

    def interrupt(self):
        """Interrupt the UxAS instance."""
        if self.process is not None and self.process.is_running:
            self.process.interrupt()

        if os.path.isfile(self.cfg_path):
            rm(self.cfg_path)
Exemple #26
0
    def run(self, previous_values):
        test_dir = self.test_env['test_dir']

        # Get the expected JSON document, or the expected error message
        expected_error = None
        try:
            expected_json = self.test_env['output']
        except KeyError:
            expected_json = None
            expected_error = self.test_env['error']
        else:
            # The object decoded from test.yaml contains sometimes strings,
            # sometimes unicode objects. Canonicalize to Unicode to have the
            # same objects as from the JSON output.
            expected_json = canonicalize_json(expected_json)

        # Read the input TOML content
        with open(os.path.join(test_dir, self.input_file), 'rb') as f:
            input_str = f.read()

        # Run the decoder with the TOML content on the standard input
        with open(os.path.join(test_dir, self.input_file), 'rb') as f:
            p = Run([os.path.join(TESTSUITE_ROOT, self.decoder_program)],
                    input=f)

        # If we expected an error, make sure we have the expected one
        if expected_error:
            if p.status == 0:
                return self.push_status(
                    'Error expected, but parsing succeeded')
            elif p.out.strip() != expected_error:
                return self.push_status('Unexpected error:\n{}'.format(p.out))
            else:
                return self.push_success()

        # Otherwise, make sure the decoder succeeded and produced the expected
        # result.
        if p.status != 0:
            return self.push_status('Decoder exitted with error status ({}):'
                                    '\n{}'.format(p.status, p.out))

        try:
            p_output_json = json.loads(p.out)
        except ValueError as exc:
            return self.push_status('Cannot parse the output JSON document:'
                                    '\n{}'
                                    '\nOutput was: {}'.format(exc, p.out))

        if p_output_json != expected_json:
            p_output_pretty = pprint.pformat(p_output_json).splitlines()
            expected_pretty = pprint.pformat(expected_json).splitlines()

            self.push_for_diff('Unexpected JSON output for the decoder',
                               p_output_pretty, expected_pretty,
                               'decoder output', 'expected output')

        return self.push_success()
Exemple #27
0
def run_tool(args, output='startup-gen.out', error='startup-gen.err'):
    p = Run(['startup-gen'] + args, output=output, error=error)

    if p.status != 0:
        print("command failed:")
        print(contents_of(output))
        print(contents_of(error))

    return contents_of(output)
Exemple #28
0
def run_and_log(*args, **kwargs):
    """
    Wrapper around e3.os.process.Run to collect all processes that are run.
    """
    start = time.time()
    p = Run(*args, **kwargs)

    # Register the command for this process as well as the time it took to run
    # it.
    try:
        cmd = kwargs['cmds']
    except KeyError:
        cmd = args[0]
    p.original_cmd = cmd
    p.duration = time.time() - start
    run_processes.append(p)

    return p
Exemple #29
0
 def test_load_config_api_1_5(self):
     sync_tree(self.spec_dir, 'new_spec_dir')
     Run(['e3-sandbox', 'migrate', '1.5', 'new_spec_dir'], output=None)
     spec_repo = AnodSpecRepository('new_spec_dir')
     spec_repo.api_version = '1.5'
     anod_class = spec_repo.load('withconfig')
     anod_instance = anod_class('', 'build')
     assert anod_instance.test1() == 9
     assert anod_instance.test_suffix() == 42
Exemple #30
0
 def test_load_config_api_1_5(self):
     sync_tree(self.spec_dir, "new_spec_dir")
     Run(["e3-sandbox", "migrate", "1.5", "new_spec_dir"], output=None)
     spec_repo = AnodSpecRepository("new_spec_dir")
     spec_repo.api_version = "1.5"
     anod_class = spec_repo.load("withconfig")
     anod_instance = anod_class("", "build")
     assert anod_instance.test1() == 9
     assert anod_instance.test_suffix() == 42