Ejemplo n.º 1
0
def do_printenv(m: Main, set_prog: bool = True) -> int:
    """Print the environment for the given spec."""
    if set_prog:
        m.argument_parser.prog = m.argument_parser.prog + " printenv"
    m.argument_parser.add_argument(
        "spec_name",
        help="spec to build. This is "
        "the basename of an .anod file (without the extension)",
    )
    m.argument_parser.add_argument("--qualifier", help="optional qualifier")
    m.argument_parser.add_argument(
        "--sandbox-dir",
        help="directory in which build artifacts are stored",
        default=SBX_DIR,
    )
    m.argument_parser.add_argument(
        "--build-env",
        help="print build environment",
        action="store_true",
        default=False,
    )
    m.parse_args()

    # Disable logging messages except errors
    logging.getLogger("").setLevel(logging.ERROR)

    check_common_tools()

    ac = create_anod_context(SPEC_DIR)
    sbx = create_anod_sandbox(m.args.sandbox_dir, SPEC_DIR)

    anod_instance = ac.add_anod_action(
        name=m.args.spec_name,
        primitive="build",
        qualifier=m.args.qualifier,
        sandbox=sbx,
        upload=False,
        env=BaseEnv.from_env(),
    ).anod_instance

    saved_env = {k: v for k, v in os.environ.items()}

    if m.args.build_env:
        if hasattr(anod_instance, "build_setenv"):
            anod_instance.build_setenv()
    else:
        if hasattr(anod_instance, "setenv"):
            anod_instance.setenv()

    for var, value in os.environ.items():
        if var not in saved_env or saved_env[var] != os.environ[var]:
            print('export %s="%s";' % (var, value))

            if m.args.verbose >= 1:
                print('printf "I set %s=\\"%s\\"\\n\\n";' % (var, value))

            print(" ")

    print(BANNER % m.args.spec_name)
    return 0
Ejemplo n.º 2
0
def main():
    """Run e3-test script."""
    m = Main()

    # Ignore arguments here as they are arguments for the actual testsuite
    m.parse_args(known_args_only=True)

    # Find first the tool configuration file. Keep track of current directory
    # that will be used to select the test subset automatically.
    cwd = os.path.abspath(os.getcwd())
    root_dir = cwd
    while not os.path.isfile(os.path.join(root_dir, 'e3-test.yaml')):
        new_root_dir = os.path.dirname(root_dir)
        if new_root_dir == root_dir:
            logging.error("cannot find e3-test.yaml")
            return 1
        root_dir = new_root_dir
    config_file = os.path.join(root_dir, 'e3-test.yaml')

    with open(config_file, 'rb') as fd:
        config = yaml.load(fd)

    if 'main' not in config:
        logging.error('cannot find testsuite main')
        return 1
    p = Run([
        sys.executable,
        os.path.join(root_dir, config['main']),
        os.path.relpath(cwd, root_dir) + '/'
    ] + config.get('default_args', []),
            output=None,
            cwd=root_dir)
    return p.status
Ejemplo n.º 3
0
def anod():
    """bin/anod script entry point.

    This script is installed in the sandbox.
    """
    import os
    import sys

    import e3.anod.driver
    import e3.anod.loader
    import e3.anod.sandbox
    import e3.env
    import e3.store
    import e3.store.cache

    sandbox_dir = os.path.abspath(
        os.path.join(os.path.dirname(sys.modules['__main__'].__file__),
                     os.pardir))

    sandbox = e3.anod.sandbox.SandBox()
    sandbox.root_dir = sandbox_dir

    # Load the local specs
    spec_repo = e3.anod.loader.AnodSpecRepository(sandbox.specs_dir)

    # Load the cache
    cache = e3.store.cache.load_cache('file-cache',
                                      {'cache_dir': sandbox.tmp_cache_dir})

    store = e3.store.load_store('http-simple-store', {}, cache)

    m = Main()
    subparsers = m.argument_parser.add_subparsers()
    anod_cmdline(subparsers, 'download', 'download a binary package')
    m.parse_args()

    action = m.args.action_name
    spec = m.args.spec
    qualifier = m.args.qualifier

    anod_cls = spec_repo.load(name=spec)
    anod_instance = anod_cls(qualifier=qualifier,
                             kind=action,
                             jobs=1,
                             env=e3.env.BaseEnv.from_env())

    # ??? inject the sandbox
    anod_instance.sandbox = sandbox

    driver = e3.anod.driver.AnodDriver(anod_instance=anod_instance,
                                       store=store)

    try:
        driver.activate(sandbox, spec_repo)
        driver.call(action)
    except AnodError as err:
        print(err, file=sys.stderr)
        sys.exit(1)
Ejemplo n.º 4
0
def do_build(m: Main, set_prog: bool = True) -> int:
    """Perform the build."""
    if set_prog:
        m.argument_parser.prog = m.argument_parser.prog + " build"
    m.argument_parser.add_argument(
        "spec_name",
        help="spec to build. This is "
        "the basename of an .anod file (without the extension)",
    )
    m.argument_parser.add_argument("--qualifier", help="optional qualifier")
    m.argument_parser.add_argument(
        "--sandbox-dir",
        help="directory in which build artefacts are stored",
        default=SBX_DIR,
    )
    m.argument_parser.add_argument(
        "--force",
        help="force rebuild of everything",
        action="store_true",
        default=False,
    )
    m.parse_args()

    check_common_tools()

    ac = create_anod_context(SPEC_DIR)
    sbx = create_anod_sandbox(m.args.sandbox_dir, SPEC_DIR)

    sbx.create_dirs()

    ac.add_anod_action(
        name=m.args.spec_name,
        primitive="build",
        qualifier=m.args.qualifier,
        sandbox=sbx,
        upload=False,
        env=BaseEnv.from_env(),
    )
    actions = ac.schedule(resolver=ac.always_create_source_resolver)

    walker = UxasBuilder(actions, sandbox=sbx, force=m.args.force)

    # TODO: something with walker.job_status['root'], assuming we can get a
    # useful value there. Right now, it's always 'unknown'
    #
    # In the meantime, python > 3.6 guarantees the order of keys in a dict.
    # The job_status dict has as its penultimate entry the thing we asked to
    # build or the last thing that failed (the last non-root node). It's ugly,
    # but _should_ be safe to use this, until we have resolution for root
    # always reporting unknown.
    result: ReturnValue = list(walker.job_status.values())[-2]

    if result in BUILD_SUCCESS:
        return 0
    else:
        return result.value
Ejemplo n.º 5
0
def main(get_argument_parser=False):
    """Manipulate an Anod sandbox.

    This function creates the main code for the entry-point e3-sandbox. To
    create new actions it is possible to create new sandbox plugins. e.g. to
    add a new plugin ``foo`` from a package ``e3-contrib``, derives the
    class :class:`SandBoxAction` and register the extension by adding in
    :file:`e3-contrib/setup.py`::

        entry_points={
            'e3.anod.sandbox.sandbox_action': [
                'foo = e3_contrib.sandbox_actions.SandBoxFoo']
        }

    :param get_argument_parser: return e3.main.Main argument_parser instead
        of running the action.
    :type get_argument_parser: bool
    """
    m = Main()
    m.parse_args(known_args_only=True)

    subparsers = m.argument_parser.add_subparsers(title="action",
                                                  description="valid actions")

    # Load all sandbox actions plugins
    ext = stevedore.ExtensionManager(
        namespace="e3.anod.sandbox.sandbox_action",
        invoke_on_load=True,
        invoke_args=(subparsers, ),
    )

    if len(ext.names()) != len(ext.entry_points_names()):
        raise SandBoxError(
            "an error occured when loading sandbox_action entry points %s" %
            ",".join(ext.entry_points_names()))  # defensive code

    if get_argument_parser:
        return m.argument_parser

    args = m.argument_parser.parse_args()

    e3.log.debug("sandbox action plugins loaded: %s", ",".join(ext.names()))

    # An action has been selected, run it
    try:
        ext[args.action].obj.run(args)
    except SandBoxError as err:
        logger.error(err)
        sys.exit(1)
Ejemplo n.º 6
0
def main(get_argument_parser=False):
    """Manipulate an Anod sandbox.

    This function creates the main code for the entry-point e3-sandbox. To
    create new actions it is possible to create new sandbox plugins. e.g. to
    add a new plugin ``foo`` from a package ``e3-contrib``, derives the
    class :class:`SandBoxAction` and register the extension by adding in
    :file:`e3-contrib/setup.py`::

        entry_points={
            'e3.anod.sandbox.sandbox_action': [
                'foo = e3_contrib.sandbox_actions.SandBoxFoo']
        }

    :param get_argument_parser: return e3.main.Main argument_parser instead
        of running the action.
    :type get_argument_parser: bool
    """
    m = Main()
    subparsers = m.argument_parser.add_subparsers(
        title="action", description="valid actions")

    # Load all sandbox actions plugins
    ext = stevedore.ExtensionManager(
        namespace='e3.anod.sandbox.sandbox_action',
        invoke_on_load=True,
        invoke_args=(subparsers, ))

    if get_argument_parser:
        return m.argument_parser

    m.parse_args()

    e3.log.debug('sandbox action plugins loaded: %s',
                 ','.join(ext.names()))

    # An action has been selected, run it
    ext[m.args.action].obj.run(m.args)
Ejemplo n.º 7
0
    def __cmdline_options(self):
        """Return an options object to represent the command line options"""
        main = Main(platform_args=True)
        parser = main.argument_parser
        parser.add_argument('--timeout', type=int, default=None)
        parser.add_argument('--report-file',
                            metavar='FILE',
                            help='The filename where to store the test report'
                            ' [required]')
        parser.add_argument('--qualif-level',
                            metavar='QUALIF_LEVEL',
                            help='The target qualification level when we are'
                            ' running in qualification mode.')

        parser.add_argument('--xcov-level',
                            help='Force the --level argument passed to xcov'
                            'instead of deducing it from the test'
                            ' category when that normally happens.')

        parser.add_argument('--tags', default="")

        control.add_shared_options_to(parser, toplevel=False)

        main.parse_args()

        # "--report-file" is a required "option" which is a bit
        # self-contradictory, but it's easy to do it that way.
        exit_if(main.args.report_file is None,
                "The report file must be specified with --report-file")

        # Get our tags set as a list. Fetch contents from file if needed
        # first:
        if main.args.tags and main.args.tags.startswith('@'):
            main.args.tags = ' '.join(lines_of(main.args.tags[1:]))
        if main.args.tags:
            main.args.tags = main.args.tags.split()

        return main.args
Ejemplo n.º 8
0
def main() -> None:
    """Provide entry point."""
    parser = argparse.ArgumentParser(
        description="Launch command with AWS credentials")

    parser.add_argument("--region",
                        help="AWS region to use",
                        default="eu-west-1")
    parser.add_argument("--profile",
                        help="AWS profile to use to run the command.",
                        default=None)
    parser.add_argument(
        "--role-arn",
        help="ARN of the role to assume to run the command.",
        required=True,
    )
    parser.add_argument(
        "--session_duration",
        help="session duration in seconds or None for default",
        default=None,
    )
    parser.add_argument("command")

    main_parser = Main(argument_parser=parser)
    main_parser.parse_args()
    assert main_parser.args is not None

    session = Session(profile=main_parser.args.profile,
                      regions=[main_parser.args.region])

    session.run(
        main_parser.args.command.split(),
        role_arn=main_parser.args.role_arn,
        session_duration=main_parser.args.session_duration,
        output=None,
    )
Ejemplo n.º 9
0
    ac.add_anod_action(
        name=m.args.spec_name,
        primitive="build",
        qualifier=m.args.qualifier,
        sandbox=sbx,
        upload=False,
        env=BaseEnv.from_env(),
    )
    actions = ac.schedule(resolver=ac.always_create_source_resolver)

    walker = UxasBuilder(actions, sandbox=sbx, force=m.args.force)

    # TODO: something with walker.job_status['root'], assuming we can get a
    # useful value there. Right now, it's always 'unknown'
    #
    # In the meantime, python > 3.6 guarantees the order of keys in a dict.
    # The job_status dict has as its penultimate entry the thing we asked to
    # build or the last thing that failed (the last non-root node). It's ugly,
    # but _should_ be safe to use this, until we have resolution for root
    # always reporting unknown.
    result: ReturnValue = list(walker.job_status.values())[-2]

    if result in BUILD_SUCCESS:
        return 0
    else:
        return result.value


if __name__ == "__main__":
    exit(do_build(Main(), set_prog=False))
Ejemplo n.º 10
0
    def testsuite_main(self, args: Optional[List[str]] = None) -> int:
        """Main for the main testsuite script.

        :param args: Command line arguments. If None, use `sys.argv`.
        :return: The testsuite status code (0 for success, a positive for
            failure).
        """
        self.main = Main(platform_args=True)

        # Add common options
        parser = self.main.argument_parser

        temp_group = parser.add_argument_group(
            title="temporaries handling arguments")
        temp_group.add_argument("-t",
                                "--temp-dir",
                                metavar="DIR",
                                default=Env().tmp_dir)
        temp_group.add_argument(
            "--no-random-temp-subdir",
            dest="random_temp_subdir",
            action="store_false",
            help="Disable the creation of a random subdirectory in the"
            " temporary directory. Use this when you know that you have"
            " exclusive access to the temporary directory (needed in order to"
            " avoid name clashes there) to get a deterministic path for"
            " testsuite temporaries.")
        temp_group.add_argument(
            "-d",
            "--dev-temp",
            metavar="DIR",
            nargs="?",
            default=None,
            const="tmp",
            help="Convenience shortcut for dev setups: forces `-t DIR"
            " --no-random-temp-subdir --cleanup-mode=none` and cleans up `DIR`"
            ' first. If no directory is provided, use the local "tmp"'
            " directory.")

        cleanup_mode_map = enum_to_cmdline_args_map(CleanupMode)
        temp_group.add_argument(
            "--cleanup-mode",
            choices=list(cleanup_mode_map),
            help="Control the cleanup of working spaces.\n" +
            "\n".join(f"{name}: {CleanupMode.descriptions()[value]}"
                      for name, value in cleanup_mode_map.items()))
        temp_group.add_argument(
            "--disable-cleanup",
            action="store_true",
            help="Disable cleanup of working spaces. This option is deprecated"
            " and will disappear in a future version of e3-testsuite. Please"
            " use --cleanup-mode instead.")

        output_group = parser.add_argument_group(
            title="results output arguments")
        output_group.add_argument(
            "-o",
            "--output-dir",
            metavar="DIR",
            default="./out",
            help="Select the output directory, where test results are to be"
            " stored (default: './out'). If --old-output-dir=DIR2 is passed,"
            " the new results are stored in DIR while DIR2 contains results"
            " from a previous run. Otherwise, the new results are stored in"
            " DIR/new/ while the old ones are stored in DIR/old. In both"
            " cases, the testsuite cleans the directory for new results"
            " first.",
        )
        output_group.add_argument(
            "--old-output-dir",
            metavar="DIR",
            help="Select the old output directory, for baseline comparison."
            " See --output-dir.",
        )
        output_group.add_argument(
            "--rotate-output-dirs",
            default=False,
            action="store_true",
            help="Rotate testsuite results: move the new results directory to"
            " the old results one before running testcases (this removes the"
            " old results directory first). If not passed, we just remove the"
            " new results directory before running testcases (i.e. just ignore"
            " the old results directory).",
        )
        output_group.add_argument(
            "--show-error-output",
            "-E",
            action="store_true",
            help="When testcases fail, display their output. This is for"
            " convenience for interactive use.",
        )
        output_group.add_argument(
            "--show-time-info",
            action="store_true",
            help="Display time information for test results, if available",
        )
        output_group.add_argument(
            "--xunit-output",
            dest="xunit_output",
            metavar="FILE",
            help="Output testsuite report to the given file in the standard"
            " XUnit XML format. This is useful to display results in"
            " continuous build systems such as Jenkins.",
        )
        output_group.add_argument(
            "--gaia-output",
            action="store_true",
            help="Output a GAIA-compatible testsuite report next to the YAML"
            " report.",
        )
        output_group.add_argument(
            "--status-update-interval",
            default=1.0,
            type=float,
            help="Minimum number of seconds between status file updates. The"
            " more often we update this file, the more often one will read"
            " garbage.")

        auto_gen_default = ("enabled"
                            if self.auto_generate_text_report else "disabled")
        output_group.add_argument(
            "--generate-text-report",
            action="store_true",
            dest="generate_text_report",
            default=self.auto_generate_text_report,
            help=(
                f"When the testsuite completes, generate a 'report' text file"
                f" in the output directory ({auto_gen_default} by default)."),
        )
        output_group.add_argument(
            "--no-generate-text-report",
            action="store_false",
            dest="generate_text_report",
            help="Disable the generation of a 'report' text file (see"
            "--generate-text-report).",
        )

        output_group.add_argument(
            "--truncate-logs",
            "-T",
            metavar="N",
            type=int,
            default=200,
            help="When outputs (for instance subprocess outputs) exceed 2*N"
            " lines, only include the first and last N lines in logs. This is"
            " necessary when storage for testsuite results have size limits,"
            " and the useful information is generally either at the beginning"
            " or the end of such outputs. If 0, never truncate logs.",
        )
        output_group.add_argument(
            "--dump-environ",
            dest="dump_environ",
            action="store_true",
            default=False,
            help="Dump all environment variables in a file named environ.sh,"
            " located in the output directory (see --output-dir). This"
            " file can then be sourced from a Bourne shell to recreate"
            " the environement that existed when this testsuite was run"
            " to produce a given testsuite report.",
        )

        exec_group = parser.add_argument_group(
            title="execution control arguments")
        exec_group.add_argument(
            "--max-consecutive-failures",
            "-M",
            metavar="N",
            type=int,
            default=self.default_max_consecutive_failures,
            help="Number of test failures (FAIL or ERROR) that trigger the"
            " abortion of the testuite. If zero, this behavior is disabled. In"
            " some cases, aborting the testsuite when there are just too many"
            " failures saves time and costs: the software to test/environment"
            " is too broken, there is no point to continue running the"
            " testsuite.",
        )
        exec_group.add_argument(
            "-j",
            "--jobs",
            dest="jobs",
            type=int,
            metavar="N",
            default=Env().build.cpu.cores,
            help="Specify the number of jobs to run simultaneously",
        )
        exec_group.add_argument(
            "--failure-exit-code",
            metavar="N",
            type=int,
            default=self.default_failure_exit_code,
            help="Exit code the testsuite must use when at least one test"
            " result shows a failure/error. By default, this is"
            f" {self.default_failure_exit_code}. This option is useful when"
            " running a testsuite in a continuous integration setup, as this"
            " can make the testing process stop when there is a regression.",
        )
        exec_group.add_argument(
            "--force-multiprocessing",
            action="store_true",
            help="Force the use of subprocesses to execute tests, for"
            " debugging purposes. This is normally automatically enabled when"
            " both the level of requested parallelism is high enough (to make"
            " it profitable regarding the contention of Python's GIL) and no"
            " test fragment has dependencies on other fragments. This flag"
            " forces the use of multiprocessing even if any of these two"
            " conditions is false.")
        parser.add_argument("sublist",
                            metavar="tests",
                            nargs="*",
                            default=[],
                            help="test")
        # Add user defined options
        self.add_options(parser)

        # Parse options
        self.main.parse_args(args)
        assert self.main.args is not None

        # If there is a chance for the logging to end up in a non-tty stream,
        # disable colors. If not, be user-friendly and automatically show error
        # outputs.
        if (self.main.args.log_file or not isatty(sys.stdout)
                or not isatty(sys.stderr)):
            enable_colors = False
        else:  # interactive-only
            enable_colors = True
            self.main.args.show_error_output = True
        self.colors = ColorConfig(enable_colors)
        self.Fore = self.colors.Fore
        self.Style = self.colors.Style

        self.env = Env()
        self.env.enable_colors = enable_colors
        self.env.root_dir = self.root_dir
        self.env.test_dir = self.test_dir

        # Setup output directories and create an index for the results we are
        # going to produce.
        self.output_dir: str
        self.old_output_dir: Optional[str]
        self.setup_result_dirs()
        self.report_index = ReportIndex(self.output_dir)

        # Set the cleanup mode from command-line arguments
        if self.main.args.cleanup_mode is not None:
            self.env.cleanup_mode = (
                cleanup_mode_map[self.main.args.cleanup_mode])
        elif self.main.args.disable_cleanup:
            logger.warning(
                "--disable-cleanup is deprecated and will disappear in a"
                " future version of e3-testsuite. Please use --cleanup-mode"
                " instead.")
            self.env.cleanup_mode = CleanupMode.NONE
        else:
            self.env.cleanup_mode = CleanupMode.default()

        # Settings for temporary directory creation
        temp_dir: str = self.main.args.temp_dir
        random_temp_subdir: bool = self.main.args.random_temp_subdir

        # The "--dev-temp" option forces several settings
        if self.main.args.dev_temp:
            self.env.cleanup_mode = CleanupMode.NONE
            temp_dir = self.main.args.dev_temp
            random_temp_subdir = False

        # Now actually setup the temporary directory: make sure we start from a
        # clean directory if we use a deterministic directory.
        #
        # Note that we do make sure that working_dir is an absolute path, as we
        # are likely to be changing directories when running each test. A
        # relative path would no longer work under those circumstances.
        temp_dir = os.path.abspath(temp_dir)
        if not random_temp_subdir:
            self.working_dir = temp_dir
            rm(self.working_dir, recursive=True)
            mkdir(self.working_dir)

        elif not os.path.isdir(temp_dir):
            # If the temp dir is supposed to be randomized, we need to create a
            # subdirectory, so check that the parent directory exists first.
            logger.critical("temp dir '%s' does not exist", temp_dir)
            return 1

        else:
            self.working_dir = tempfile.mkdtemp("", "tmp", temp_dir)

        # Create the exchange directory (to exchange data between the testsuite
        # main and the subprocesses running test fragments). Compute the name
        # of the file to pass environment data to subprocesses.
        self.exchange_dir = os.path.join(self.working_dir, "exchange")
        self.env_filename = os.path.join(self.exchange_dir, "_env.bin")
        mkdir(self.exchange_dir)

        # Make them both available to test fragments
        self.env.exchange_dir = self.exchange_dir
        self.env.env_filename = self.env_filename

        self.gaia_result_files: Dict[str, GAIAResultFiles] = {}
        """Mapping from test names to files for results in the GAIA report."""

        # Store in global env: target information and common paths
        self.env.output_dir = self.output_dir
        self.env.working_dir = self.working_dir
        self.env.options = self.main.args

        # Create an object to report testsuite execution status to users
        from e3.testsuite.running_status import RunningStatus
        self.running_status = RunningStatus(
            os.path.join(self.output_dir, "status"),
            self.main.args.status_update_interval,
        )

        # User specific startup
        self.set_up()

        # Retrieve the list of test
        self.test_list = self.get_test_list(self.main.args.sublist)

        # Create a DAG to constraint the test execution order
        dag = DAG()
        for parsed_test in self.test_list:
            self.add_test(dag, parsed_test)
        self.adjust_dag_dependencies(dag)
        dag.check()
        self.running_status.set_dag(dag)

        # Determine whether to use multiple processes for fragment execution
        # parallelism.
        self.use_multiprocessing = self.compute_use_multiprocessing()
        self.env.use_multiprocessing = self.use_multiprocessing

        # Record modules lookup path, including for the file corresponding to
        # the __main__ module.  Subprocesses will need it to have access to the
        # same modules.
        main_module = sys.modules["__main__"]
        self.env.modules_search_path = [
            os.path.dirname(os.path.abspath(main_module.__file__))
        ] + sys.path

        # Now that the env is supposed to be complete, dump it for the test
        # fragments to pick it up.
        self.env.store(self.env_filename)

        # For debugging purposes, dump the final DAG to a DOT file
        with open(os.path.join(self.output_dir, "tests.dot"), "w") as fd:
            fd.write(dag.as_dot())

        if self.use_multiprocessing:
            self.run_multiprocess_mainloop(dag)
        else:
            self.run_standard_mainloop(dag)

        self.report_index.write()
        self.dump_testsuite_result()
        if self.main.args.xunit_output:
            dump_xunit_report(self, self.main.args.xunit_output)
        if self.main.args.gaia_output:
            dump_gaia_report(self, self.output_dir, self.gaia_result_files)

        # Clean everything
        self.tear_down()

        # If requested, generate a text report
        if self.main.args.generate_text_report:
            # Use the previous testsuite results for comparison, if available
            old_index = (ReportIndex.read(self.old_output_dir)
                         if self.old_output_dir else None)

            # Include all information, except logs for successful tests, which
            # is just too verbose.
            with open(os.path.join(self.output_dir, "report"),
                      "w",
                      encoding="utf-8") as f:
                generate_report(
                    output_file=f,
                    new_index=self.report_index,
                    old_index=old_index,
                    colors=ColorConfig(colors_enabled=False),
                    show_all_logs=False,
                    show_xfail_logs=True,
                    show_error_output=True,
                    show_time_info=True,
                )

        # Return the appropriate status code: 1 when there is a framework
        # issue, the failure status code from the --failure-exit-code=N option
        # when there is a least one testcase failure, or 0.
        statuses = {
            s
            for s, count in self.report_index.status_counters.items() if count
        }
        if TestStatus.FAIL in statuses or TestStatus.ERROR in statuses:
            return self.main.args.failure_exit_code
        else:
            return 0
Ejemplo n.º 11
0
class TestsuiteCore:
    """Testsuite Core driver.

    This class is the base of Testsuite class and should not be instanciated.
    It's not recommended to override any of the functions declared in it.

    See documentation of Testsuite class for overridable methods and
    variables.
    """
    def __init__(
        self,
        root_dir: Optional[str] = None,
        testsuite_name: str = "Untitled testsute",
    ) -> None:
        """Testsuite constructor.

        :param root_dir: Root directory for the testsuite. If left to None, use
            the directory containing the Python module that created self's
            class.
        :param testsuite_name: Name for this testsuite. It can be used to
            provide a title in some report formats.
        """
        if root_dir is None:
            root_dir = os.path.dirname(inspect.getfile(type(self)))
        self.root_dir = os.path.abspath(root_dir)
        self.test_dir = os.path.join(self.root_dir, self.tests_subdir)
        logger.debug("Test directory: %s", self.test_dir)
        self.consecutive_failures = 0
        self.return_values: Dict[str, Any] = {}
        self.result_tracebacks: Dict[str, List[str]] = {}
        self.testsuite_name = testsuite_name

        self.aborted_too_many_failures = False
        """
        Whether the testsuite aborted because of too many consecutive test
        failures (see the --max-consecutive-failures command-line option).
        """

        self.use_multiprocessing = False
        """Whether to use multi-processing for tests parallelism.

        Beyond a certain level of parallelism, Python's GIL contention is too
        high to benefit from more processors. When we reach this level, it is
        more interesting to use multiple processes to cancel the GIL
        contention.

        The actual value for this attribute is computed once the DAG is built,
        in the "compute_use_multiprocessing" method.
        """

    # Mypy does not support decorators on properties, so keep the actual
    # implementations for deprecated properties in methods.

    @deprecated(2)
    def _test_counter(self) -> int:
        return len(self.report_index.entries)

    @deprecated(2)
    def _test_status_counters(self) -> Dict[TestStatus, int]:
        return self.report_index.status_counters

    @deprecated(2)
    def _results(self) -> Dict[str, TestStatus]:
        return {
            e.test_name: e.status
            for e in self.report_index.entries.values()
        }

    @property
    def test_counter(self) -> int:
        """Return the number of test results in the report.

        Warning: this method is obsolete and will be removed in the future.
        """
        return self._test_counter()

    @property
    def test_status_counters(self) -> Dict[TestStatus, int]:
        """Return test result counts per test status.

        Warning: this method is obsolete and will be removed in the future.
        """
        return self._test_status_counters()

    @property
    def results(self) -> Dict[str, TestStatus]:
        """Return a mapping from test names to results.

        Warning: this method is obsolete and will be removed in the future.
        """
        return self._results()

    def compute_use_multiprocessing(self) -> bool:
        """Return whether to use multi-processing for tests parallelism.

        See docstring for the "use_multiprocessing" attribute. Subclasses are
        free to override this to take control of when multiprocessing is
        enabled. Note that this will disregard the "--force-multiprocessing"
        command line option.
        """
        raise NotImplementedError

    def testsuite_main(self, args: Optional[List[str]] = None) -> int:
        """Main for the main testsuite script.

        :param args: Command line arguments. If None, use `sys.argv`.
        :return: The testsuite status code (0 for success, a positive for
            failure).
        """
        self.main = Main(platform_args=True)

        # Add common options
        parser = self.main.argument_parser

        temp_group = parser.add_argument_group(
            title="temporaries handling arguments")
        temp_group.add_argument("-t",
                                "--temp-dir",
                                metavar="DIR",
                                default=Env().tmp_dir)
        temp_group.add_argument(
            "--no-random-temp-subdir",
            dest="random_temp_subdir",
            action="store_false",
            help="Disable the creation of a random subdirectory in the"
            " temporary directory. Use this when you know that you have"
            " exclusive access to the temporary directory (needed in order to"
            " avoid name clashes there) to get a deterministic path for"
            " testsuite temporaries.")
        temp_group.add_argument(
            "-d",
            "--dev-temp",
            metavar="DIR",
            nargs="?",
            default=None,
            const="tmp",
            help="Convenience shortcut for dev setups: forces `-t DIR"
            " --no-random-temp-subdir --cleanup-mode=none` and cleans up `DIR`"
            ' first. If no directory is provided, use the local "tmp"'
            " directory.")

        cleanup_mode_map = enum_to_cmdline_args_map(CleanupMode)
        temp_group.add_argument(
            "--cleanup-mode",
            choices=list(cleanup_mode_map),
            help="Control the cleanup of working spaces.\n" +
            "\n".join(f"{name}: {CleanupMode.descriptions()[value]}"
                      for name, value in cleanup_mode_map.items()))
        temp_group.add_argument(
            "--disable-cleanup",
            action="store_true",
            help="Disable cleanup of working spaces. This option is deprecated"
            " and will disappear in a future version of e3-testsuite. Please"
            " use --cleanup-mode instead.")

        output_group = parser.add_argument_group(
            title="results output arguments")
        output_group.add_argument(
            "-o",
            "--output-dir",
            metavar="DIR",
            default="./out",
            help="Select the output directory, where test results are to be"
            " stored (default: './out'). If --old-output-dir=DIR2 is passed,"
            " the new results are stored in DIR while DIR2 contains results"
            " from a previous run. Otherwise, the new results are stored in"
            " DIR/new/ while the old ones are stored in DIR/old. In both"
            " cases, the testsuite cleans the directory for new results"
            " first.",
        )
        output_group.add_argument(
            "--old-output-dir",
            metavar="DIR",
            help="Select the old output directory, for baseline comparison."
            " See --output-dir.",
        )
        output_group.add_argument(
            "--rotate-output-dirs",
            default=False,
            action="store_true",
            help="Rotate testsuite results: move the new results directory to"
            " the old results one before running testcases (this removes the"
            " old results directory first). If not passed, we just remove the"
            " new results directory before running testcases (i.e. just ignore"
            " the old results directory).",
        )
        output_group.add_argument(
            "--show-error-output",
            "-E",
            action="store_true",
            help="When testcases fail, display their output. This is for"
            " convenience for interactive use.",
        )
        output_group.add_argument(
            "--show-time-info",
            action="store_true",
            help="Display time information for test results, if available",
        )
        output_group.add_argument(
            "--xunit-output",
            dest="xunit_output",
            metavar="FILE",
            help="Output testsuite report to the given file in the standard"
            " XUnit XML format. This is useful to display results in"
            " continuous build systems such as Jenkins.",
        )
        output_group.add_argument(
            "--gaia-output",
            action="store_true",
            help="Output a GAIA-compatible testsuite report next to the YAML"
            " report.",
        )
        output_group.add_argument(
            "--status-update-interval",
            default=1.0,
            type=float,
            help="Minimum number of seconds between status file updates. The"
            " more often we update this file, the more often one will read"
            " garbage.")

        auto_gen_default = ("enabled"
                            if self.auto_generate_text_report else "disabled")
        output_group.add_argument(
            "--generate-text-report",
            action="store_true",
            dest="generate_text_report",
            default=self.auto_generate_text_report,
            help=(
                f"When the testsuite completes, generate a 'report' text file"
                f" in the output directory ({auto_gen_default} by default)."),
        )
        output_group.add_argument(
            "--no-generate-text-report",
            action="store_false",
            dest="generate_text_report",
            help="Disable the generation of a 'report' text file (see"
            "--generate-text-report).",
        )

        output_group.add_argument(
            "--truncate-logs",
            "-T",
            metavar="N",
            type=int,
            default=200,
            help="When outputs (for instance subprocess outputs) exceed 2*N"
            " lines, only include the first and last N lines in logs. This is"
            " necessary when storage for testsuite results have size limits,"
            " and the useful information is generally either at the beginning"
            " or the end of such outputs. If 0, never truncate logs.",
        )
        output_group.add_argument(
            "--dump-environ",
            dest="dump_environ",
            action="store_true",
            default=False,
            help="Dump all environment variables in a file named environ.sh,"
            " located in the output directory (see --output-dir). This"
            " file can then be sourced from a Bourne shell to recreate"
            " the environement that existed when this testsuite was run"
            " to produce a given testsuite report.",
        )

        exec_group = parser.add_argument_group(
            title="execution control arguments")
        exec_group.add_argument(
            "--max-consecutive-failures",
            "-M",
            metavar="N",
            type=int,
            default=self.default_max_consecutive_failures,
            help="Number of test failures (FAIL or ERROR) that trigger the"
            " abortion of the testuite. If zero, this behavior is disabled. In"
            " some cases, aborting the testsuite when there are just too many"
            " failures saves time and costs: the software to test/environment"
            " is too broken, there is no point to continue running the"
            " testsuite.",
        )
        exec_group.add_argument(
            "-j",
            "--jobs",
            dest="jobs",
            type=int,
            metavar="N",
            default=Env().build.cpu.cores,
            help="Specify the number of jobs to run simultaneously",
        )
        exec_group.add_argument(
            "--failure-exit-code",
            metavar="N",
            type=int,
            default=self.default_failure_exit_code,
            help="Exit code the testsuite must use when at least one test"
            " result shows a failure/error. By default, this is"
            f" {self.default_failure_exit_code}. This option is useful when"
            " running a testsuite in a continuous integration setup, as this"
            " can make the testing process stop when there is a regression.",
        )
        exec_group.add_argument(
            "--force-multiprocessing",
            action="store_true",
            help="Force the use of subprocesses to execute tests, for"
            " debugging purposes. This is normally automatically enabled when"
            " both the level of requested parallelism is high enough (to make"
            " it profitable regarding the contention of Python's GIL) and no"
            " test fragment has dependencies on other fragments. This flag"
            " forces the use of multiprocessing even if any of these two"
            " conditions is false.")
        parser.add_argument("sublist",
                            metavar="tests",
                            nargs="*",
                            default=[],
                            help="test")
        # Add user defined options
        self.add_options(parser)

        # Parse options
        self.main.parse_args(args)
        assert self.main.args is not None

        # If there is a chance for the logging to end up in a non-tty stream,
        # disable colors. If not, be user-friendly and automatically show error
        # outputs.
        if (self.main.args.log_file or not isatty(sys.stdout)
                or not isatty(sys.stderr)):
            enable_colors = False
        else:  # interactive-only
            enable_colors = True
            self.main.args.show_error_output = True
        self.colors = ColorConfig(enable_colors)
        self.Fore = self.colors.Fore
        self.Style = self.colors.Style

        self.env = Env()
        self.env.enable_colors = enable_colors
        self.env.root_dir = self.root_dir
        self.env.test_dir = self.test_dir

        # Setup output directories and create an index for the results we are
        # going to produce.
        self.output_dir: str
        self.old_output_dir: Optional[str]
        self.setup_result_dirs()
        self.report_index = ReportIndex(self.output_dir)

        # Set the cleanup mode from command-line arguments
        if self.main.args.cleanup_mode is not None:
            self.env.cleanup_mode = (
                cleanup_mode_map[self.main.args.cleanup_mode])
        elif self.main.args.disable_cleanup:
            logger.warning(
                "--disable-cleanup is deprecated and will disappear in a"
                " future version of e3-testsuite. Please use --cleanup-mode"
                " instead.")
            self.env.cleanup_mode = CleanupMode.NONE
        else:
            self.env.cleanup_mode = CleanupMode.default()

        # Settings for temporary directory creation
        temp_dir: str = self.main.args.temp_dir
        random_temp_subdir: bool = self.main.args.random_temp_subdir

        # The "--dev-temp" option forces several settings
        if self.main.args.dev_temp:
            self.env.cleanup_mode = CleanupMode.NONE
            temp_dir = self.main.args.dev_temp
            random_temp_subdir = False

        # Now actually setup the temporary directory: make sure we start from a
        # clean directory if we use a deterministic directory.
        #
        # Note that we do make sure that working_dir is an absolute path, as we
        # are likely to be changing directories when running each test. A
        # relative path would no longer work under those circumstances.
        temp_dir = os.path.abspath(temp_dir)
        if not random_temp_subdir:
            self.working_dir = temp_dir
            rm(self.working_dir, recursive=True)
            mkdir(self.working_dir)

        elif not os.path.isdir(temp_dir):
            # If the temp dir is supposed to be randomized, we need to create a
            # subdirectory, so check that the parent directory exists first.
            logger.critical("temp dir '%s' does not exist", temp_dir)
            return 1

        else:
            self.working_dir = tempfile.mkdtemp("", "tmp", temp_dir)

        # Create the exchange directory (to exchange data between the testsuite
        # main and the subprocesses running test fragments). Compute the name
        # of the file to pass environment data to subprocesses.
        self.exchange_dir = os.path.join(self.working_dir, "exchange")
        self.env_filename = os.path.join(self.exchange_dir, "_env.bin")
        mkdir(self.exchange_dir)

        # Make them both available to test fragments
        self.env.exchange_dir = self.exchange_dir
        self.env.env_filename = self.env_filename

        self.gaia_result_files: Dict[str, GAIAResultFiles] = {}
        """Mapping from test names to files for results in the GAIA report."""

        # Store in global env: target information and common paths
        self.env.output_dir = self.output_dir
        self.env.working_dir = self.working_dir
        self.env.options = self.main.args

        # Create an object to report testsuite execution status to users
        from e3.testsuite.running_status import RunningStatus
        self.running_status = RunningStatus(
            os.path.join(self.output_dir, "status"),
            self.main.args.status_update_interval,
        )

        # User specific startup
        self.set_up()

        # Retrieve the list of test
        self.test_list = self.get_test_list(self.main.args.sublist)

        # Create a DAG to constraint the test execution order
        dag = DAG()
        for parsed_test in self.test_list:
            self.add_test(dag, parsed_test)
        self.adjust_dag_dependencies(dag)
        dag.check()
        self.running_status.set_dag(dag)

        # Determine whether to use multiple processes for fragment execution
        # parallelism.
        self.use_multiprocessing = self.compute_use_multiprocessing()
        self.env.use_multiprocessing = self.use_multiprocessing

        # Record modules lookup path, including for the file corresponding to
        # the __main__ module.  Subprocesses will need it to have access to the
        # same modules.
        main_module = sys.modules["__main__"]
        self.env.modules_search_path = [
            os.path.dirname(os.path.abspath(main_module.__file__))
        ] + sys.path

        # Now that the env is supposed to be complete, dump it for the test
        # fragments to pick it up.
        self.env.store(self.env_filename)

        # For debugging purposes, dump the final DAG to a DOT file
        with open(os.path.join(self.output_dir, "tests.dot"), "w") as fd:
            fd.write(dag.as_dot())

        if self.use_multiprocessing:
            self.run_multiprocess_mainloop(dag)
        else:
            self.run_standard_mainloop(dag)

        self.report_index.write()
        self.dump_testsuite_result()
        if self.main.args.xunit_output:
            dump_xunit_report(self, self.main.args.xunit_output)
        if self.main.args.gaia_output:
            dump_gaia_report(self, self.output_dir, self.gaia_result_files)

        # Clean everything
        self.tear_down()

        # If requested, generate a text report
        if self.main.args.generate_text_report:
            # Use the previous testsuite results for comparison, if available
            old_index = (ReportIndex.read(self.old_output_dir)
                         if self.old_output_dir else None)

            # Include all information, except logs for successful tests, which
            # is just too verbose.
            with open(os.path.join(self.output_dir, "report"),
                      "w",
                      encoding="utf-8") as f:
                generate_report(
                    output_file=f,
                    new_index=self.report_index,
                    old_index=old_index,
                    colors=ColorConfig(colors_enabled=False),
                    show_all_logs=False,
                    show_xfail_logs=True,
                    show_error_output=True,
                    show_time_info=True,
                )

        # Return the appropriate status code: 1 when there is a framework
        # issue, the failure status code from the --failure-exit-code=N option
        # when there is a least one testcase failure, or 0.
        statuses = {
            s
            for s, count in self.report_index.status_counters.items() if count
        }
        if TestStatus.FAIL in statuses or TestStatus.ERROR in statuses:
            return self.main.args.failure_exit_code
        else:
            return 0

    def get_test_list(self, sublist: List[str]) -> List[ParsedTest]:
        """Retrieve the list of tests.

        :param sublist: A list of tests scenarios or patterns.
        """
        # Use a mapping: test name -> ParsedTest when building the result, as
        # several patterns in "sublist" may yield the same testcase.
        testcases: Dict[str, ParsedTest] = {}
        test_finders = self.test_finders
        dedicated_dirs_only = all(tf.test_dedicated_directory
                                  for tf in test_finders)

        def matches_pattern(pattern: Optional[Pattern[str]],
                            name: str) -> bool:
            return pattern is None or bool(pattern.search(name))

        def add_testcase(pattern: Optional[Pattern[str]],
                         test: ParsedTest) -> None:

            # Do not add this testcase if its test-specific matcher does not
            # match the requested pattern.
            if test.test_matcher and not matches_pattern(
                    pattern, test.test_matcher):
                return

            if test.test_name in testcases:
                self.add_test_error(
                    test_name=test.test_name,
                    message=f"duplicate test name: {test.test_name}",
                    tb=None,
                )
            else:
                testcases[test.test_name] = test

        def helper(spec: str) -> None:
            pattern: Optional[Pattern[str]] = None

            # If the given pattern is a directory, do not go through the whole
            # tests subdirectory.
            if os.path.isdir(spec):
                root = spec
            else:
                root = self.test_dir
                try:
                    pattern = re.compile(spec)
                except re.error as exc:
                    logger.debug(
                        "Test pattern is not a valid regexp, try to match it"
                        " as-is: {}".format(exc))
                    pattern = re.compile(re.escape(spec))

            # For each directory in the requested subdir, ask our test finders
            # to probe for a testcase. Register matches.
            for dirpath, dirnames, filenames in os.walk(root,
                                                        followlinks=True):
                # If all tests are guaranteed to have a dedicated directory,
                # do not process directories that don't match the requested
                # pattern.
                if dedicated_dirs_only and not matches_pattern(
                        pattern, dirpath):
                    continue

                # The first test finder that has a match "wins". When handling
                # test data, we want to deal only with absolute paths, so get
                # the absolute name now.
                dirpath = os.path.abspath(dirpath)
                for tf in test_finders:
                    try:
                        test_or_list = tf.probe(self, dirpath, dirnames,
                                                filenames)
                    except ProbingError as exc:
                        self.add_test_error(
                            test_name=self.test_name(dirpath),
                            message=str(exc),
                            tb=traceback.format_exc(),
                        )
                        break
                    if isinstance(test_or_list, list):
                        for t in test_or_list:
                            add_testcase(pattern, t)
                        break
                    elif test_or_list is not None:
                        add_testcase(pattern, test_or_list)
                        break

        # If specific tests are requested, only look for them. Otherwise, just
        # look in the tests subdirectory.
        if sublist:
            for s in sublist:
                helper(s)
        else:
            helper(self.test_dir)

        result = list(testcases.values())
        logger.info("Found {} tests".format(len(result)))
        logger.debug("tests:\n  " + "\n  ".join(t.test_dir for t in result))
        return result

    def add_test(self, dag: DAG, parsed_test: ParsedTest) -> None:
        """Register a test to run.

        :param dag: The DAG of test fragments to execute for the testsuite.
        :param parsed_test: Test to instantiate.
        """
        test_name = parsed_test.test_name

        # Complete the test environment
        test_env = dict(parsed_test.test_env)
        test_env["test_dir"] = parsed_test.test_dir
        test_env["test_name"] = test_name

        assert isinstance(self.env.working_dir, str)
        test_env["working_dir"] = os.path.join(self.env.working_dir, test_name)

        # Fetch the test driver to use
        driver = parsed_test.driver_cls
        if not driver:
            if self.default_driver:
                driver = self.test_driver_map[self.default_driver]
            else:
                self.add_test_error(
                    test_name=test_name,
                    message="missing test driver",
                )
                return

        # Finally run the driver instantiation
        try:
            instance = driver(self.env, test_env)
            instance.Fore = self.Fore
            instance.Style = self.Style
            instance.add_test(dag)

        except Exception as e:
            self.add_test_error(
                test_name=test_name,
                message=str(e),
                tb=traceback.format_exc(),
            )

    def dump_testsuite_result(self) -> None:
        """Log a summary of test results.

        Subclasses are free to override this to do whatever is suitable for
        them.
        """
        lines = ["Summary:"]

        # Display test count for each status, but only for status that have
        # at least one test. Sort them by status value, to get consistent
        # order.
        def sort_key(couple: Tuple[TestStatus, int]) -> Any:
            status, _ = couple
            return status.value

        stats = sorted(
            ((status, count)
             for status, count in self.report_index.status_counters.items()
             if count),
            key=sort_key,
        )
        for status, count in stats:
            lines.append("  {}{: <12}{} {}".format(
                status.color(self.colors),
                status.name,
                self.Style.RESET_ALL,
                count,
            ))
        if not stats:
            lines.append("  <no test result>")
        logger.info("\n".join(lines))

        # Dump the comment file
        with open(os.path.join(self.output_dir, "comment"), "w") as f:
            self.write_comment_file(f)

    def collect_result(self, fragment: TestFragment) -> None:
        """Import test results from ``fragment`` into testsuite reports.

        :param fragment: Test fragment (just completed) from which to import
            test results.
        """
        assert self.main.args

        # Keep track of the number of consecutive failures seen so far if it
        # reaches the maximum number allowed, we must abort the testsuite.
        max_consecutive_failures = self.main.args.max_consecutive_failures

        while fragment.result_queue:
            item = fragment.result_queue.pop()

            self.add_result(item)

            # Update the number of consecutive failures, aborting the testsuite
            # if appropriate
            if item.result.status in (TestStatus.ERROR, TestStatus.FAIL):
                self.consecutive_failures += 1
                if (max_consecutive_failures > 0 and
                        self.consecutive_failures >= max_consecutive_failures):
                    self.aborted_too_many_failures = True
                    logger.error(
                        "Too many consecutive failures, aborting the testsuite"
                    )
                    raise KeyboardInterrupt
            else:
                self.consecutive_failures = 0

    def add_result(self, item: ResultQueueItem) -> None:
        """Add a test result to the result index and log it.

        :param item: Result queue item for the result to add.
        """
        assert self.main.args

        status = item.result.status
        test_name = item.result.test_name

        # The test results that reach this point are special: there were
        # serialized/deserialized through YAML, so the Log layer disappeared.
        assert status is not None

        # Ensure that we don't have two results with the same test name

        def indented_tb(tb: List[str]) -> str:
            return "".join("  {}".format(line) for line in tb)

        assert test_name not in self.report_index.entries, (
            f"cannot push twice results for {test_name}"
            f"\nFirst push happened at:"
            f"\n{indented_tb(self.result_tracebacks[test_name])}"
            f"\nThis one happened at:"
            f"\n{indented_tb(item.traceback)}")

        # Now that the result is validated, add it to our internals
        self.report_index.add_result(item.result, item.filename)
        self.result_tracebacks[test_name] = item.traceback
        self.running_status.set_status_counters(
            self.report_index.status_counters)
        if item.gaia_results:
            self.gaia_result_files[test_name] = item.gaia_results

        # Log the test result. If error output is requested and the test
        # failed unexpectedly, show the detailed logs.
        log_line = summary_line(item.result, self.colors,
                                self.main.args.show_time_info)
        if self.main.args.show_error_output and status not in (
                TestStatus.PASS,
                TestStatus.XFAIL,
                TestStatus.XPASS,
                TestStatus.SKIP,
        ):
            full_result = self.report_index.entries[test_name].load()

            def format_log(log: Log) -> str:
                return "\n" + str(log) + self.Style.RESET_ALL

            if full_result.diff:
                log_line += format_log(full_result.diff)
            else:
                log_line += format_log(full_result.log)
        logger.info(log_line)

    def add_test_error(self,
                       test_name: str,
                       message: str,
                       tb: Optional[str] = None) -> None:
        """Create and add an ERROR test status.

        :param test_name: Prefix for the test result to create. This adds a
            suffix to avoid clashes.
        :param str message: Error message.
        :param tb: Optional traceback for the error.
        """
        from e3.testsuite.driver import ResultQueueItem

        result = TestResult(
            f"{test_name}__except{len(self.report_index.entries)}",
            env={},
            status=TestStatus.ERROR,
            msg=message,
        )
        if tb:
            result.log += tb

        self.add_result(
            ResultQueueItem(
                result.summary,
                result.save(self.output_dir),
                traceback.format_stack(),
                dump_result_logs_if_needed(self.env, result, self.output_dir),
            ))

    def setup_result_dirs(self) -> None:
        """Create the output directory in which the results are stored."""
        assert self.main.args
        args = self.main.args

        # Both the actual new/old directories to use depend on both
        # --output-dir and --old-output-dir options.
        d = os.path.abspath(args.output_dir)
        if args.old_output_dir:
            self.output_dir = d
            old_output_dir = os.path.abspath(args.old_output_dir)
        else:
            self.output_dir = os.path.join(d, "new")
            old_output_dir = os.path.join(d, "old")

        # Rotate results directories if requested. In both cases, make sure the
        # new results dir is clean.
        if args.rotate_output_dirs:
            if os.path.isdir(old_output_dir):
                rm(old_output_dir, recursive=True)
            if os.path.isdir(self.output_dir):
                mv(self.output_dir, old_output_dir)
        elif os.path.isdir(self.output_dir):
            rm(self.output_dir, recursive=True)
        mkdir(self.output_dir)

        # Remember about the old output directory only if it exists and does
        # contain results. If not, this info will be unused at best, or lead to
        # incorrect behavior.
        self.old_output_dir = None
        if (os.path.exists(old_output_dir) and os.path.exists(
                os.path.join(old_output_dir, ReportIndex.INDEX_FILENAME))):
            self.old_output_dir = old_output_dir

        if args.dump_environ:
            with open(os.path.join(self.output_dir, "environ.sh"), "w") as f:
                for var_name in sorted(os.environ):
                    f.write("export {}={}\n".format(
                        var_name, quote_arg(os.environ[var_name])))

    def run_standard_mainloop(self, dag: DAG) -> None:
        """Run the main loop to execute test fragments in threads."""
        assert self.main.args is not None

        from e3.job import Job
        from e3.testsuite.fragment import FragmentData, ThreadTestFragment

        def job_factory(
            uid: str,
            data: Any,
            predecessors: FrozenSet[str],
            notify_end: Callable[[str], None],
        ) -> ThreadTestFragment:
            """Turn a DAG item into a ThreadTestFragment instance."""
            assert isinstance(data, FragmentData)

            # When passing return values from predecessors, remove current test
            # name from the keys to ease referencing by user (the short
            # fragment name can then be used by user without knowing the full
            # node id).
            key_prefix = data.driver.test_name + "."
            key_prefix_len = len(key_prefix)

            def filter_key(k: str) -> str:
                if k.startswith(key_prefix):
                    return k[key_prefix_len:]
                else:
                    return k

            return ThreadTestFragment(
                uid,
                data.driver,
                data.callback,
                {filter_key(k): self.return_values[k]
                 for k in predecessors},
                notify_end,
                self.running_status,
            )

        def collect_result(job: Job) -> bool:
            """Collect test results from the given fragment."""
            assert isinstance(job, ThreadTestFragment)
            self.return_values[job.uid] = job.return_value
            self.collect_result(job)

            # In the e3.job.scheduler API, collect returning "True" means
            # "requeue the job". We never want to do that.
            return False

        # Create a scheduler to run all fragments for the testsuite main loop
        scheduler = Scheduler(
            job_provider=job_factory,
            tokens=self.main.args.jobs,
            collect=collect_result,
        )

        # Run the tests. Note that when the testsuite aborts because of too
        # many consecutive test failures, we still want to produce a report and
        # exit through regular ways, to catch KeyboardInterrupt exceptions,
        # which e3's scheduler uses to abort the execution loop, but only in
        # such cases. In other words, let the exception propagates if it's the
        # user that interrupted the testsuite.
        try:
            scheduler.run(dag)
        except KeyboardInterrupt:
            if not self.aborted_too_many_failures:  # interactive-only
                raise

    def run_multiprocess_mainloop(self, dag: DAG) -> None:
        """Run the main loop to execute test fragments in subprocesses."""
        assert self.main.args is not None

        from e3.testsuite.fragment import FragmentData, ProcessTestFragment
        from e3.testsuite.multiprocess_scheduler import MultiprocessScheduler

        def job_factory(uid: str, data: FragmentData,
                        slot: int) -> ProcessTestFragment:
            """Turn a DAG item into a ProcessTestFragment instance."""
            assert data.callback_by_name
            return ProcessTestFragment(
                uid,
                data.driver,
                data.name,
                slot,
                self.running_status,
                self.env,
            )

        def collect_result(job: ProcessTestFragment) -> None:
            """Collect test results from the given fragment."""
            job.collect_result()
            self.collect_result(job)

        scheduler: MultiprocessScheduler[FragmentData, ProcessTestFragment] = (
            MultiprocessScheduler(dag,
                                  job_factory,
                                  collect_result,
                                  jobs=self.main.args.jobs))

        # See corresponding code/comment in run_multithread_mainloop
        try:
            scheduler.run()
        except KeyboardInterrupt:
            if not self.aborted_too_many_failures:  # interactive-only
                raise

    # Unlike the previous methods, the following ones are supposed to be
    # overriden.

    @property
    def tests_subdir(self) -> str:
        """
        Return the subdirectory in which tests are looked for.

        The returned directory name is considered relative to the root
        testsuite directory (self.root_dir).
        """
        raise NotImplementedError

    @property
    def test_driver_map(self) -> Dict[str, Type[TestDriver]]:
        """Return a map from test driver names to TestDriver subclasses.

        Test finders will be able to use this map to fetch the test drivers
        referenced in testcases.
        """
        raise NotImplementedError

    @property
    def default_driver(self) -> Optional[str]:
        """Return the name of the default driver for testcases.

        When tests do not query a specific driver, the one associated to this
        name is used instead. If this property returns None, all tests are
        required to query a driver.
        """
        raise NotImplementedError

    def test_name(self, test_dir: str) -> str:
        """Compute the test name given a testcase spec.

        This function can be overridden. By default it uses the name of the
        test directory. Note that the test name should be a valid filename (not
        dir seprators, or special characters such as ``:``, ...).
        """
        raise NotImplementedError

    @property
    def test_finders(self) -> List[TestFinder]:
        """Return test finders to probe tests directories."""
        raise NotImplementedError

    def add_options(self, parser: argparse.ArgumentParser) -> None:
        """Add testsuite specific switches.

        Subclasses can override this method to add their own testsuite
        command-line options.

        :param parser: Parser for command-line arguments. See
            <https://docs.python.org/3/library/argparse.html> for usage.
        """
        raise NotImplementedError

    def set_up(self) -> None:
        """Execute operations before running the testsuite.

        Before running this, command-line arguments were parsed. After this
        returns, the testsuite will look for testcases.

        By default, this does nothing. Overriding this method allows testsuites
        to prepare the execution of the testsuite depending on their needs. For
        instance:

        * process testsuite-specific options;
        * initialize environment variables;
        * adjust self.env (object forwarded to test drivers).
        """
        raise NotImplementedError

    def tear_down(self) -> None:
        """Execute operation when finalizing the testsuite.

        By default, this cleans the working (temporary) directory in which the
        tests were run.
        """
        raise NotImplementedError

    def write_comment_file(self, comment_file: IO[str]) -> None:
        """Write the comment file's content.

        :param comment_file: File descriptor for the comment file.  Overriding
            methods should only call its "write" method (or print to it).
        """
        raise NotImplementedError

    @property
    def default_max_consecutive_failures(self) -> int:
        """Return the default maximum number of consecutive failures.

        In some cases, aborting the testsuite when there are just too many
        failures saves time and costs: the software to test/environment is too
        broken, there is no point to continue running the testsuite.

        This property must return the number of test failures (FAIL or ERROR)
        that trigger the abortion of the testuite. If zero, this behavior is
        disabled.
        """
        raise NotImplementedError

    @property
    def default_failure_exit_code(self) -> int:
        """Return the default exit code when at least one test fails."""
        raise NotImplementedError

    @property
    def auto_generate_text_report(self) -> bool:
        """Return whether to automatically generate a text report.

        This is disabled by default (and controlled by the
        --generate-text-report command-line option) because the generation of
        this report can add non-trivial overhead depending on results.
        """
        raise NotImplementedError

    def adjust_dag_dependencies(self, dag: DAG) -> None:
        """Adjust dependencies in the DAG of all test fragments.

        :param dag: DAG to adjust.
        :param fragments: Set of all fragments added so far to the DAG.
        """
        raise NotImplementedError

    @property
    def multiprocessing_supported(self) -> bool:
        """Return whether running test fragments in subprocesses is supported.

        When multiprocessing is enabled (see the "use_multiprocessing"
        attribute), test fragments are executed in a separate process, and the
        propagation of their return values is disabled (FragmentData's
        "previous_values" argument is always an empty dict).

        This means that multiprocessing can work only if test drivers and all
        code used by test fragments can be imported by subprocesses (for
        instance, class defined in the testsuite entry point are unavailable)
        and if test drivers don't use the "previous_values" mechanism.

        Testsuite authors can use the "--force-multiprocessing" testsuite
        option to check if this works.
        """
        raise NotImplementedError
Ejemplo n.º 12
0
        qualifier=m.args.qualifier,
        sandbox=sbx,
        upload=False,
        env=BaseEnv.from_env(),
    ).anod_instance

    saved_env = {k: v for k, v in os.environ.items()}

    if m.args.build_env:
        if hasattr(anod_instance, "build_setenv"):
            anod_instance.build_setenv()
    else:
        if hasattr(anod_instance, "setenv"):
            anod_instance.setenv()

    for var, value in os.environ.items():
        if var not in saved_env or saved_env[var] != os.environ[var]:
            print('export %s="%s";' % (var, value))

            if m.args.verbose >= 1:
                print('printf "I set %s=\\"%s\\"\\n\\n";' % (var, value))

            print(" ")

    print(BANNER % m.args.spec_name)
    return 0


if __name__ == "__main__":
    exit(do_printenv(Main(), set_prog=False))
Ejemplo n.º 13
0
class TestsuiteCore:
    """Testsuite Core driver.

    This class is the base of Testsuite class and should not be instanciated.
    It's not recommended to override any of the functions declared in it.

    See documentation of Testsuite class for overridable methods and
    variables.
    """
    def __init__(self,
                 root_dir: Optional[str] = None,
                 testsuite_name: str = "Untitled testsute") -> None:
        """Testsuite constructor.

        :param root_dir: Root directory for the testsuite. If left to None, use
            the directory containing the Python module that created self's
            class.
        :param testsuite_name: Name for this testsuite. It can be used to
            provide a title in some report formats.
        """
        if root_dir is None:
            root_dir = os.path.dirname(inspect.getfile(type(self)))
        self.root_dir = os.path.abspath(root_dir)
        self.test_dir = os.path.join(self.root_dir, self.tests_subdir)
        logger.debug("Test directory: %s", self.test_dir)
        self.consecutive_failures = 0
        self.return_values: Dict[str, Any] = {}
        self.result_tracebacks: Dict[str, List[str]] = {}
        self.testsuite_name = testsuite_name

        self.aborted_too_many_failures = False
        """
        Whether the testsuite aborted because of too many consecutive test
        failures (see the --max-consecutive-failures command-line option).
        """

    # Mypy does not support decorators on properties, so keep the actual
    # implementations for deprecated properties in methods.

    @deprecated(2)
    def _test_counter(self) -> int:
        return len(self.report_index.entries)

    @deprecated(2)
    def _test_status_counters(self) -> Dict[TestStatus, int]:
        return self.report_index.status_counters

    @deprecated(2)
    def _results(self) -> Dict[str, TestStatus]:
        return {
            e.test_name: e.status
            for e in self.report_index.entries.values()
        }

    @property
    def test_counter(self) -> int:
        """Return the number of test results in the report.

        Warning: this method is obsolete and will be removed in the future.
        """
        return self._test_counter()

    @property
    def test_status_counters(self) -> Dict[TestStatus, int]:
        """Return test result counts per test status.

        Warning: this method is obsolete and will be removed in the future.
        """
        return self._test_status_counters()

    @property
    def results(self) -> Dict[str, TestStatus]:
        """Return a mapping from test names to results.

        Warning: this method is obsolete and will be removed in the future.
        """
        return self._results()

    def test_result_filename(self, test_name: str) -> str:
        """Return the name of the file in which the result are stored.

        :param test_name: Name of the test for this result file.
        """
        return os.path.join(self.output_dir, test_name + ".yaml")

    def job_factory(self, uid: str, data: Any, predecessors: FrozenSet[str],
                    notify_end: Callable[[str], None]) -> TestFragment:
        """Run internal function.

        See e3.job.scheduler
        """
        # We assume that data[0] is the test instance and data[1] the method
        # to call.

        # When passing return values from predecessors, remove current test
        # name from the keys to ease referencing by user (the short fragment
        # name can then be used by user without knowing the full node id).
        key_prefix = data[0].test_name + "."
        key_prefix_len = len(key_prefix)

        def filter_key(k: str) -> str:
            if k.startswith(key_prefix):
                return k[key_prefix_len:]
            else:
                return k

        return TestFragment(
            uid,
            data[0],
            data[1],
            {filter_key(k): self.return_values[k]
             for k in predecessors},
            notify_end,
        )

    def testsuite_main(self, args: Optional[List[str]] = None) -> int:
        """Main for the main testsuite script.

        :param args: Command line arguments. If None, use `sys.argv`.
        :return: The testsuite status code (0 for success, a positive for
            failure).
        """
        self.main = Main(platform_args=True)

        # Add common options
        parser = self.main.argument_parser
        parser.add_argument(
            "-o",
            "--output-dir",
            metavar="DIR",
            default="./out",
            help="select output dir",
        )
        parser.add_argument("-t",
                            "--temp-dir",
                            metavar="DIR",
                            default=Env().tmp_dir)
        parser.add_argument(
            "-d",
            "--dev-temp",
            nargs="?",
            default=None,
            const="tmp",
            help="Unlike --temp-dir, use this very directory to store"
            " testsuite temporaries (i.e. no random subdirectory). Also"
            " automatically disable temp dir cleanup, to be developer"
            " friendly. If no directory is provided, use the local"
            " \"tmp\" directory")
        parser.add_argument(
            "--max-consecutive-failures",
            "-M",
            metavar="N",
            type=int,
            default=self.default_max_consecutive_failures,
            help="Number of test failures (FAIL or ERROR) that trigger the"
            " abortion of the testuite. If zero, this behavior is disabled. In"
            " some cases, aborting the testsuite when there are just too many"
            " failures saves time and costs: the software to test/environment"
            " is too broken, there is no point to continue running the"
            " testsuite.")
        parser.add_argument(
            "--keep-old-output-dir",
            default=False,
            action="store_true",
            help="This is default with this testsuite framework. The option"
            " is kept only to keep backward compatibility of invocation with"
            " former framework (gnatpython.testdriver)",
        )
        parser.add_argument(
            "--disable-cleanup",
            dest="enable_cleanup",
            action="store_false",
            default=True,
            help="disable cleanup of working space",
        )
        parser.add_argument(
            "-j",
            "--jobs",
            dest="jobs",
            type=int,
            metavar="N",
            default=Env().build.cpu.cores,
            help="Specify the number of jobs to run simultaneously",
        )
        parser.add_argument(
            "--show-error-output",
            "-E",
            action="store_true",
            help="When testcases fail, display their output. This is for"
            " convenience for interactive use.",
        )
        parser.add_argument(
            "--show-time-info",
            action="store_true",
            help="Display time information for test results, if available")
        parser.add_argument(
            "--dump-environ",
            dest="dump_environ",
            action="store_true",
            default=False,
            help="Dump all environment variables in a file named environ.sh,"
            " located in the output directory (see --output-dir). This"
            " file can then be sourced from a Bourne shell to recreate"
            " the environement that existed when this testsuite was run"
            " to produce a given testsuite report.",
        )
        parser.add_argument(
            "--xunit-output",
            dest="xunit_output",
            metavar="FILE",
            help="Output testsuite report to the given file in the standard"
            " XUnit XML format. This is useful to display results in"
            " continuous build systems such as Jenkins.",
        )
        parser.add_argument(
            "--gaia-output",
            action="store_true",
            help="Output a GAIA-compatible testsuite report next to the YAML"
            " report.")
        parser.add_argument(
            "--truncate-logs",
            "-T",
            metavar="N",
            type=int,
            default=200,
            help="When outputs (for instance subprocess outputs) exceed 2*N"
            " lines, only include the first and last N lines in logs. This is"
            " necessary when storage for testsuite results have size limits,"
            " and the useful information is generally either at the beginning"
            " or the end of such outputs. If 0, never truncate logs.")
        parser.add_argument(
            "--failure-exit-code",
            metavar="N",
            type=int,
            default=self.default_failure_exit_code,
            help="Exit code the testsuite must use when at least one test"
            " result shows a failure/error. By default, this is"
            f" {self.default_failure_exit_code}. This option is useful when"
            " running a testsuite in a continuous integration setup, as this"
            " can make the testing process stop when there is a regression.")
        parser.add_argument("sublist",
                            metavar="tests",
                            nargs="*",
                            default=[],
                            help="test")
        # Add user defined options
        self.add_options(parser)

        # Parse options
        self.main.parse_args(args)
        assert self.main.args is not None

        # If there is a chance for the logging to end up in a non-tty stream,
        # disable colors. If not, be user-friendly and automatically show error
        # outputs.
        if (self.main.args.log_file or not isatty(sys.stdout)
                or not isatty(sys.stderr)):
            enable_colors = False
        else:  # interactive-only
            enable_colors = True
            self.main.args.show_error_output = True
        self.colors = ColorConfig(enable_colors)
        self.Fore = self.colors.Fore
        self.Style = self.colors.Style

        self.env = BaseEnv.from_env()
        self.env.enable_colors = enable_colors
        self.env.root_dir = self.root_dir
        self.env.test_dir = self.test_dir

        # At this stage compute commonly used paths Keep the working dir as
        # short as possible, to avoid the risk of having a path that's too long
        # (a problem often seen on Windows, or when using WRS tools that have
        # their own max path limitations).
        #
        # Note that we do make sure that working_dir is an absolute path, as we
        # are likely to be changing directories when running each test. A
        # relative path would no longer work under those circumstances.
        d = os.path.abspath(self.main.args.output_dir)
        self.output_dir = os.path.join(d, "new")
        self.old_output_dir = os.path.join(d, "old")

        if self.main.args.dev_temp:
            # Use a temporary directory for developers: make sure it is an
            # empty directory and disable cleanup to ease post-mortem
            # investigation.
            self.working_dir = os.path.abspath(self.main.args.dev_temp)
            rm(self.working_dir, recursive=True)
            mkdir(self.working_dir)
            self.main.args.enable_cleanup = False

        else:
            # If the temp dir is supposed to be randomized, we need to create a
            # subdirectory, so check that the parent directory exists first.
            if not os.path.isdir(self.main.args.temp_dir):
                logger.critical("temp dir '%s' does not exist",
                                self.main.args.temp_dir)
                return 1

            self.working_dir = tempfile.mkdtemp(
                "", "tmp", os.path.abspath(self.main.args.temp_dir))

        # Create the new output directory that will hold the results and create
        # an index for it.
        self.setup_result_dir()
        self.report_index = ReportIndex(self.output_dir)

        # Store in global env: target information and common paths
        self.env.output_dir = self.output_dir
        self.env.working_dir = self.working_dir
        self.env.options = self.main.args

        # User specific startup
        self.set_up()

        # Retrieve the list of test
        self.has_error = False
        self.test_list = self.get_test_list(self.main.args.sublist)

        # Launch the mainloop
        self.total_test = len(self.test_list)
        self.run_test = 0

        self.scheduler = Scheduler(
            job_provider=self.job_factory,
            tokens=self.main.args.jobs,

            # correct_result expects specifically TestFragment instances (a Job
            # subclass), while Scheduler only guarantees Job instances.
            # Test drivers are supposed to register only TestFragment
            # instances, so the following cast should be fine.
            collect=cast(Any, self.collect_result),
        )
        actions = DAG()
        for parsed_test in self.test_list:
            if not self.add_test(actions, parsed_test):
                self.has_error = True
        actions.check()

        with open(os.path.join(self.output_dir, "tests.dot"), "w") as fd:
            fd.write(actions.as_dot())

        # Run the tests. Note that when the testsuite aborts because of too
        # many consecutive test failures, we still want to produce a report and
        # exit through regular ways, to catch KeyboardInterrupt exceptions,
        # which e3's scheduler uses to abort the execution loop, but only in
        # such cases. In other words, let the exception propagates if it's the
        # user that interrupted the testsuite.
        try:
            self.scheduler.run(actions)
        except KeyboardInterrupt:
            if not self.aborted_too_many_failures:  # interactive-only
                raise

        self.report_index.write()
        self.dump_testsuite_result()
        if self.main.args.xunit_output:
            dump_xunit_report(self, self.main.args.xunit_output)
        if self.main.args.gaia_output:
            dump_gaia_report(self, self.output_dir)

        # Clean everything
        self.tear_down()

        # Return the appropriate status code: 1 when there is a framework
        # issue, the failure status code from the --failure-exit-code=N option
        # when there is a least one testcase failure, or 0.
        statuses = {
            s
            for s, count in self.report_index.status_counters.items() if count
        }
        if self.has_error:
            return 1
        elif TestStatus.FAIL in statuses or TestStatus.ERROR in statuses:
            return self.main.args.failure_exit_code
        else:
            return 0

    def get_test_list(self, sublist: List[str]) -> List[ParsedTest]:
        """Retrieve the list of tests.

        :param sublist: A list of tests scenarios or patterns.
        """
        # Use a mapping: test name -> ParsedTest when building the result, as
        # several patterns in "sublist" may yield the same testcase.
        testcases: Dict[str, ParsedTest] = {}
        test_finders = self.test_finders

        def add_testcase(test: ParsedTest) -> None:
            testcases[test.test_name] = test

        def helper(spec: str) -> None:
            pattern: Optional[Pattern[str]] = None

            # If the given pattern is a directory, do not go through the whole
            # tests subdirectory.
            if os.path.isdir(spec):
                root = spec
            else:
                root = self.test_dir
                try:
                    pattern = re.compile(spec)
                except re.error as exc:
                    logger.debug(
                        "Test pattern is not a valid regexp, try to match it"
                        " as-is: {}".format(exc))
                    pattern = re.compile(re.escape(spec))

            # For each directory in the requested subdir, ask our test finders
            # to probe for a testcase. Register matches.
            for dirpath, dirnames, filenames in os.walk(root,
                                                        followlinks=True):
                # If the directory name does not match the given pattern, skip
                # it.
                if pattern is not None and not pattern.search(dirpath):
                    continue

                # The first test finder that has a match "wins". When handling
                # test data, we want to deal only with absolute paths, so get
                # the absolute name now.
                dirpath = os.path.abspath(dirpath)
                for tf in test_finders:
                    try:
                        test_or_list = tf.probe(self, dirpath, dirnames,
                                                filenames)
                    except ProbingError as exc:
                        self.has_error = True
                        logger.error(str(exc))
                        break
                    if isinstance(test_or_list, list):
                        for t in test_or_list:
                            add_testcase(t)
                        break
                    elif test_or_list is not None:
                        add_testcase(test_or_list)
                        break

        # If specific tests are requested, only look for them. Otherwise, just
        # look in the tests subdirectory.
        if sublist:
            for s in sublist:
                helper(s)
        else:
            helper(self.test_dir)

        result = list(testcases.values())
        logger.info("Found {} tests".format(len(result)))
        logger.debug("tests:\n  " + "\n  ".join(t.test_dir for t in result))
        return result

    def add_test(self, actions: DAG, parsed_test: ParsedTest) -> bool:
        """Register a test to run.

        :param actions: The dag of actions for the testsuite.
        :param parsed_test: Test to instantiate.
        :return: Whether the test was successfully registered.
        """
        test_name = parsed_test.test_name

        # Complete the test environment
        test_env = dict(parsed_test.test_env)
        test_env["test_dir"] = parsed_test.test_dir
        test_env["test_name"] = test_name

        assert isinstance(self.env.working_dir, str)
        test_env["working_dir"] = os.path.join(self.env.working_dir, test_name)

        # Fetch the test driver to use
        driver = parsed_test.driver_cls
        if not driver:
            if self.default_driver:
                driver = self.test_driver_map[self.default_driver]
            else:
                logger.error("missing driver for test '{}'".format(test_name))
                return False

        # Finally run the driver instantiation
        try:
            instance = driver(self.env, test_env)
            instance.Fore = self.Fore
            instance.Style = self.Style
            instance.add_test(actions)

        except Exception as e:
            error_msg = str(e)
            error_msg += "\nTraceback:\n"
            error_msg += "\n".join(traceback.format_tb(sys.exc_info()[2]))
            logger.error(error_msg)
            return False

        return True

    def dump_testsuite_result(self) -> None:
        """Log a summary of test results.

        Subclasses are free to override this to do whatever is suitable for
        them.
        """
        lines = ['Summary:']

        # Display test count for each status, but only for status that have
        # at least one test. Sort them by status value, to get consistent
        # order.
        def sort_key(couple: Tuple[TestStatus, int]) -> Any:
            status, _ = couple
            return status.value

        stats = sorted(
            ((status, count)
             for status, count in self.report_index.status_counters.items()
             if count),
            key=sort_key)
        for status, count in stats:
            lines.append('  {}{: <12}{} {}'.format(status.color(self.colors),
                                                   status.name,
                                                   self.Style.RESET_ALL,
                                                   count))
        if not stats:
            lines.append('  <no test result>')
        logger.info('\n'.join(lines))

        # Dump the comment file
        with open(os.path.join(self.output_dir, "comment"), "w") as f:
            self.write_comment_file(f)

    def collect_result(self, job: TestFragment) -> bool:
        """Run internal function.

        :param job: A job that is finished.
        """
        assert self.main.args

        # Keep track of the number of consecutive failures seen so far if it
        # reaches the maximum number allowed, we must abort the testsuite.
        max_consecutive_failures = self.main.args.max_consecutive_failures
        consecutive_failures = 0

        self.return_values[job.uid] = job.return_value

        while job.test_instance.result_queue:
            result, tb = job.test_instance.result_queue.pop()

            # The test results that reach this point are special: there were
            # serialized/deserialized through YAML, so the Log layer
            # disappeared.
            assert result.status is not None

            # Log the test result. If error output is requested and the test
            # failed unexpectedly, show the detailed logs.
            log_line = summary_line(result, self.colors,
                                    self.main.args.show_time_info)
            if (self.main.args.show_error_output and result.status
                    not in (TestStatus.PASS, TestStatus.XFAIL,
                            TestStatus.XPASS, TestStatus.SKIP)):

                def format_log(log: Log) -> str:
                    return "\n" + str(log) + self.Style.RESET_ALL

                if result.diff:
                    log_line += format_log(result.diff)
                else:
                    log_line += format_log(result.log)
            logger.info(log_line)

            def indented_tb(tb: List[str]) -> str:
                return "".join("  {}".format(line) for line in tb)

            assert result.test_name not in self.report_index.entries, (
                "cannot push twice results for {}"
                "\nFirst push happened at:"
                "\n{}"
                "\nThis one happened at:"
                "\n{}".format(
                    result.test_name,
                    indented_tb(self.result_tracebacks[result.test_name]),
                    indented_tb(tb),
                ))
            with open(self.test_result_filename(result.test_name), "w") as fd:
                yaml.dump(result, fd)
            self.report_index.add_result(result)
            self.result_tracebacks[result.test_name] = tb

            # Update the number of consecutive failures, aborting the testsuite
            # if appropriate
            if result.status in (TestStatus.ERROR, TestStatus.FAIL):
                consecutive_failures += 1
                if (max_consecutive_failures > 0
                        and consecutive_failures >= max_consecutive_failures):
                    self.aborted_too_many_failures = True
                    logger.error(
                        "Too many consecutive failures, aborting the testsuite"
                    )
                    raise KeyboardInterrupt
            else:
                consecutive_failures = 0

        return False

    def setup_result_dir(self) -> None:
        """Create the output directory in which the results are stored."""
        assert self.main.args

        if os.path.isdir(self.old_output_dir):
            rm(self.old_output_dir, True)
        if os.path.isdir(self.output_dir):
            mv(self.output_dir, self.old_output_dir)
        mkdir(self.output_dir)

        if self.main.args.dump_environ:
            with open(os.path.join(self.output_dir, "environ.sh"), "w") as f:
                for var_name in sorted(os.environ):
                    f.write("export {}={}\n".format(
                        var_name, quote_arg(os.environ[var_name])))

    # Unlike the previous methods, the following ones are supposed to be
    # overriden.

    @property
    def tests_subdir(self) -> str:
        """
        Return the subdirectory in which tests are looked for.

        The returned directory name is considered relative to the root
        testsuite directory (self.root_dir).
        """
        raise NotImplementedError

    @property
    def test_driver_map(self) -> Dict[str, Type[TestDriver]]:
        """Return a map from test driver names to TestDriver subclasses.

        Test finders will be able to use this map to fetch the test drivers
        referenced in testcases.
        """
        raise NotImplementedError

    @property
    def default_driver(self) -> Optional[str]:
        """Return the name of the default driver for testcases.

        When tests do not query a specific driver, the one associated to this
        name is used instead. If this property returns None, all tests are
        required to query a driver.
        """
        raise NotImplementedError

    def test_name(self, test_dir: str) -> str:
        """Compute the test name given a testcase spec.

        This function can be overridden. By default it uses the name of the
        test directory. Note that the test name should be a valid filename (not
        dir seprators, or special characters such as ``:``, ...).
        """
        raise NotImplementedError

    @property
    def test_finders(self) -> List[TestFinder]:
        """Return test finders to probe tests directories."""
        raise NotImplementedError

    def add_options(self, parser: argparse.ArgumentParser) -> None:
        """Add testsuite specific switches.

        Subclasses can override this method to add their own testsuite
        command-line options.

        :param parser: Parser for command-line arguments. See
            <https://docs.python.org/3/library/argparse.html> for usage.
        """
        raise NotImplementedError

    def set_up(self) -> None:
        """Execute operations before running the testsuite.

        Before running this, command-line arguments were parsed. After this
        returns, the testsuite will look for testcases.

        By default, this does nothing. Overriding this method allows testsuites
        to prepare the execution of the testsuite depending on their needs. For
        instance:

        * process testsuite-specific options;
        * initialize environment variables;
        * adjust self.env (object forwarded to test drivers).
        """
        raise NotImplementedError

    def tear_down(self) -> None:
        """Execute operation when finalizing the testsuite.

        By default, this cleans the working (temporary) directory in which the
        tests were run.
        """
        raise NotImplementedError

    def write_comment_file(self, comment_file: IO[str]) -> None:
        """Write the comment file's content.

        :param comment_file: File descriptor for the comment file.  Overriding
            methods should only call its "write" method (or print to it).
        """
        raise NotImplementedError

    @property
    def default_max_consecutive_failures(self) -> int:
        """Return the default maximum number of consecutive failures.

        In some cases, aborting the testsuite when there are just too many
        failures saves time and costs: the software to test/environment is too
        broken, there is no point to continue running the testsuite.

        This property must return the number of test failures (FAIL or ERROR)
        that trigger the abortion of the testuite. If zero, this behavior is
        disabled.
        """
        raise NotImplementedError

    @property
    def default_failure_exit_code(self) -> int:
        """Return the default exit code when at least one test fails."""
        raise NotImplementedError
Ejemplo n.º 14
0
class TestsuiteCore(object):
    """Testsuite Core driver.

    This class is the base of Testsuite class and should not be instanciated.
    It's not recommended to override any of the functions declared in it.

    See documentation of Testsuite class for overridable methods and
    variables.
    """
    def __init__(self, root_dir):
        """Testsuite constructor.

        :param root_dir: root dir of the testsuite. Usually the directory in
            which testsuite.py and runtest.py are located
        :type root_dir: str | unicode
        """
        self.root_dir = os.path.abspath(root_dir)
        self.test_dir = os.path.join(self.root_dir, self.TEST_SUBDIR)
        self.consecutive_failures = 0
        self.return_values = {}
        self.results = {}
        self.test_counter = 0
        self.test_status_counters = {s: 0 for s in TestStatus}

    def test_result_filename(self, test_name):
        """Return the name of the file in which the result are stored.

        :param test_case_file: path to a test case scenario relative to the
            test directory
        :type test_case_file: str | unicode
        :param variant: the test variant
        :type variant: str
        :return: the test name. Note that test names should not contain path
            separators
        :rtype: str | unicode
        """
        return os.path.join(self.output_dir, test_name + '.yaml')

    def job_factory(self, uid, data, predecessors, notify_end):
        """Run internal function.

        See e3.job.scheduler
        """
        # we assume that data[0] is the test instance and data[1] the method
        # to call

        # When passing return values from predecessors, remove current test
        # name from the keys to ease referencing by user (the short fragment
        # name can then be used by user without knowing the full node id).
        key_prefix = data[0].test_name + '.'
        key_prefix_len = len(key_prefix)

        def filter_key(k):
            if k.startswith(key_prefix):
                return k[key_prefix_len:]
            else:
                return k

        return TestFragment(
            uid, data[0], data[1],
            {filter_key(k): self.return_values[k]
             for k in predecessors}, notify_end)

    def testsuite_main(self, args=None):
        """Main for the main testsuite script.

        :param args: command line arguments. If None use sys.argv
        :type args: list[str] | None
        """
        self.main = Main(platform_args=self.CROSS_SUPPORT)

        # Add common options
        parser = self.main.argument_parser
        parser.add_argument("-o",
                            "--output-dir",
                            metavar="DIR",
                            default="./out",
                            help="select output dir")
        parser.add_argument("-t",
                            "--temp-dir",
                            metavar="DIR",
                            default=Env().tmp_dir)
        parser.add_argument(
            "--max-consecutive-failures",
            default=0,
            help="If there are more than N consecutive failures, the testsuite"
            " is aborted. If set to 0 (default) then the testsuite will never"
            " be stopped")
        parser.add_argument(
            "--keep-old-output-dir",
            default=False,
            action="store_true",
            help="This is default with this testsuite framework. The option"
            " is kept only to keep backward compatibility of invocation with"
            " former framework (gnatpython.testdriver)")
        parser.add_argument("--disable-cleanup",
                            dest="enable_cleanup",
                            action="store_false",
                            default=True,
                            help="disable cleanup of working space")
        parser.add_argument(
            "-j",
            "--jobs",
            dest="jobs",
            type=int,
            metavar="N",
            default=Env().build.cpu.cores,
            help="Specify the number of jobs to run simultaneously")
        parser.add_argument(
            "--show-error-output",
            action="store_true",
            help="When testcases fail, display their output. This is for"
            " convenience for interactive use.")
        parser.add_argument(
            "--dump-environ",
            dest="dump_environ",
            action="store_true",
            default=False,
            help="Dump all environment variables in a file named environ.sh,"
            " located in the output directory (see --output-dir). This"
            " file can then be sourced from a Bourne shell to recreate"
            " the environement that existed when this testsuite was run"
            " to produce a given testsuite report.")
        parser.add_argument('sublist',
                            metavar='tests',
                            nargs='*',
                            default=[],
                            help='test')
        # Add user defined options
        self.add_options()

        # parse options
        self.main.parse_args(args)

        self.env = BaseEnv.from_env()
        self.env.root_dir = self.root_dir
        self.env.test_dir = self.test_dir

        # At this stage compute commonly used paths
        # Keep the working dir as short as possible, to avoid the risk
        # of having a path that's too long (a problem often seen on
        # Windows, or when using WRS tools that have their own max path
        # limitations).
        # Note that we do make sure that working_dir is an absolute
        # path, as we are likely to be changing directories when
        # running each test. A relative path would no longer work
        # under those circumstances.
        d = os.path.abspath(self.main.args.output_dir)
        self.output_dir = os.path.join(d, 'new')
        self.old_output_dir = os.path.join(d, 'old')

        if not os.path.isdir(self.main.args.temp_dir):
            logging.critical("temp dir '%s' does not exist",
                             self.main.args.temp_dir)
            return 1

        self.working_dir = tempfile.mkdtemp(
            '', 'tmp', os.path.abspath(self.main.args.temp_dir))

        # Create the new output directory that will hold the results
        self.setup_result_dir()

        # Store in global env: target information and common paths
        self.env.output_dir = self.output_dir
        self.env.working_dir = self.working_dir
        self.env.options = self.main.args

        # User specific startup
        self.tear_up()

        # Retrieve the list of test
        self.test_list = self.get_test_list(self.main.args.sublist)

        # Launch the mainloop
        self.total_test = len(self.test_list)
        self.run_test = 0

        self.scheduler = Scheduler(job_provider=self.job_factory,
                                   collect=self.collect_result,
                                   tokens=self.main.args.jobs)
        actions = DAG()
        for test in self.test_list:
            self.parse_test(actions, test)

        with open(os.path.join(self.output_dir, 'tests.dot'), 'wb') as fd:
            fd.write(actions.as_dot())
        self.scheduler.run(actions)

        self.dump_testsuite_result()

        # Clean everything
        self.tear_down()
        return 0

    def parse_test(self, actions, test_case_file):
        """Register a test.

        :param actions: the dag of actions for the testsuite
        :type actions: e3.collection.dag.DAG
        :param test_case_file: filename containing the testcase
        :type test_case_file: str
        """
        # Load testcase file
        test_env = load_with_config(
            os.path.join(self.test_dir, test_case_file),
            Env().to_dict())

        # Ensure that the test_env act like a dictionary
        if not isinstance(test_env, collections.Mapping):
            test_env = {
                'test_name': self.test_name(test_case_file),
                'test_yaml_wrong_content': test_env
            }
            logger.error("abort test because of invalid test.yaml")
            return

        # Add to the test environment the directory in which the test.yaml is
        # stored
        test_env['test_dir'] = os.path.join(self.env.test_dir,
                                            os.path.dirname(test_case_file))
        test_env['test_case_file'] = test_case_file
        test_env['test_name'] = self.test_name(test_case_file)
        test_env['working_dir'] = os.path.join(self.env.working_dir,
                                               test_env['test_name'])

        if 'driver' in test_env:
            driver = test_env['driver']
        else:
            driver = self.default_driver

        logger.debug('set driver to %s' % driver)
        if driver not in self.DRIVERS or \
                not issubclass(self.DRIVERS[driver], TestDriver):
            logger.error('cannot find driver for %s' % test_case_file)
            return

        try:
            instance = self.DRIVERS[driver](self.env, test_env)
            instance.add_test(actions)

        except Exception as e:
            error_msg = str(e)
            error_msg += "Traceback:\n"
            error_msg += "\n".join(traceback.format_tb(sys.exc_traceback))
            logger.error(error_msg)
            return

    def dump_testsuite_result(self):
        """To be implemented."""
        pass

    def collect_result(self, job):
        """Run internal function.

        :param job: a job that is finished
        :type job: TestFragment
        """
        self.return_values[job.uid] = job.return_value
        while job.test_instance.result_queue:
            result = job.test_instance.result_queue.pop()
            logging.info('%-12s %s' % (str(result.status), result.test_name))
            assert result.test_name not in self.results, \
                'cannot push twice results for %s' % result.test_name
            with open(self.test_result_filename(result.test_name), 'wb') as fd:
                yaml.dump(result, fd)
            self.results[result.test_name] = result.status
            self.test_counter += 1
            self.test_status_counters[result.status] += 1
        return False

    def setup_result_dir(self):
        """Create the output directory in which the results are stored."""
        if os.path.isdir(self.old_output_dir):
            rm(self.old_output_dir, True)
        if os.path.isdir(self.output_dir):
            mv(self.output_dir, self.old_output_dir)
        mkdir(self.output_dir)

        if self.main.args.dump_environ:
            with open(os.path.join(self.output_dir, 'environ.sh'), 'w') as f:
                for var_name in sorted(os.environ):
                    f.write('export %s=%s\n' %
                            (var_name, quote_arg(os.environ[var_name])))
Ejemplo n.º 15
0
    def testsuite_main(self, args=None):
        """Main for the main testsuite script.

        :param args: command line arguments. If None use sys.argv
        :type args: list[str] | None
        """
        self.main = Main(platform_args=self.CROSS_SUPPORT)

        # Add common options
        parser = self.main.argument_parser
        parser.add_argument("-o",
                            "--output-dir",
                            metavar="DIR",
                            default="./out",
                            help="select output dir")
        parser.add_argument("-t",
                            "--temp-dir",
                            metavar="DIR",
                            default=Env().tmp_dir)
        parser.add_argument(
            "--max-consecutive-failures",
            default=0,
            help="If there are more than N consecutive failures, the testsuite"
            " is aborted. If set to 0 (default) then the testsuite will never"
            " be stopped")
        parser.add_argument(
            "--keep-old-output-dir",
            default=False,
            action="store_true",
            help="This is default with this testsuite framework. The option"
            " is kept only to keep backward compatibility of invocation with"
            " former framework (gnatpython.testdriver)")
        parser.add_argument("--disable-cleanup",
                            dest="enable_cleanup",
                            action="store_false",
                            default=True,
                            help="disable cleanup of working space")
        parser.add_argument(
            "-j",
            "--jobs",
            dest="jobs",
            type=int,
            metavar="N",
            default=Env().build.cpu.cores,
            help="Specify the number of jobs to run simultaneously")
        parser.add_argument(
            "--show-error-output",
            action="store_true",
            help="When testcases fail, display their output. This is for"
            " convenience for interactive use.")
        parser.add_argument(
            "--dump-environ",
            dest="dump_environ",
            action="store_true",
            default=False,
            help="Dump all environment variables in a file named environ.sh,"
            " located in the output directory (see --output-dir). This"
            " file can then be sourced from a Bourne shell to recreate"
            " the environement that existed when this testsuite was run"
            " to produce a given testsuite report.")
        parser.add_argument('sublist',
                            metavar='tests',
                            nargs='*',
                            default=[],
                            help='test')
        # Add user defined options
        self.add_options()

        # parse options
        self.main.parse_args(args)

        self.env = BaseEnv.from_env()
        self.env.root_dir = self.root_dir
        self.env.test_dir = self.test_dir

        # At this stage compute commonly used paths
        # Keep the working dir as short as possible, to avoid the risk
        # of having a path that's too long (a problem often seen on
        # Windows, or when using WRS tools that have their own max path
        # limitations).
        # Note that we do make sure that working_dir is an absolute
        # path, as we are likely to be changing directories when
        # running each test. A relative path would no longer work
        # under those circumstances.
        d = os.path.abspath(self.main.args.output_dir)
        self.output_dir = os.path.join(d, 'new')
        self.old_output_dir = os.path.join(d, 'old')

        if not os.path.isdir(self.main.args.temp_dir):
            logging.critical("temp dir '%s' does not exist",
                             self.main.args.temp_dir)
            return 1

        self.working_dir = tempfile.mkdtemp(
            '', 'tmp', os.path.abspath(self.main.args.temp_dir))

        # Create the new output directory that will hold the results
        self.setup_result_dir()

        # Store in global env: target information and common paths
        self.env.output_dir = self.output_dir
        self.env.working_dir = self.working_dir
        self.env.options = self.main.args

        # User specific startup
        self.tear_up()

        # Retrieve the list of test
        self.test_list = self.get_test_list(self.main.args.sublist)

        # Launch the mainloop
        self.total_test = len(self.test_list)
        self.run_test = 0

        self.scheduler = Scheduler(job_provider=self.job_factory,
                                   collect=self.collect_result,
                                   tokens=self.main.args.jobs)
        actions = DAG()
        for test in self.test_list:
            self.parse_test(actions, test)

        with open(os.path.join(self.output_dir, 'tests.dot'), 'wb') as fd:
            fd.write(actions.as_dot())
        self.scheduler.run(actions)

        self.dump_testsuite_result()

        # Clean everything
        self.tear_down()
        return 0
Ejemplo n.º 16
0
def anod():
    """bin/anod script entry point.

    This script is installed in the sandbox.
    """
    import os
    import sys

    import e3.anod.driver
    import e3.anod.loader
    import e3.anod.sandbox
    import e3.env
    import e3.store
    import e3.store.cache

    sandbox_dir = os.path.abspath(os.path.join(os.path.dirname(
        sys.modules['__main__'].__file__), os.pardir))

    sandbox = e3.anod.sandbox.SandBox()
    sandbox.root_dir = sandbox_dir

    # Load the local specs
    spec_repo = e3.anod.loader.AnodSpecRepository(
        os.path.join(sandbox_dir, 'specs'))

    # Load the cache
    cache = e3.store.cache.load_cache(
        'file-cache',
        {'cache_dir': sandbox.tmp_cache_dir})

    store = e3.store.load_store(
        'http-simple-store', {}, cache)

    m = Main()
    subparsers = m.argument_parser.add_subparsers()
    anod_cmdline(subparsers, 'download', 'download a binary package')
    m.parse_args()

    action = m.args.action_name
    spec = m.args.spec
    qualifier = m.args.qualifier

    anod_cls = spec_repo.load(name=spec)
    anod_instance = anod_cls(qualifier=qualifier,
                             kind=action,
                             jobs=1,
                             env=e3.env.BaseEnv.from_env())

    # ??? inject the sandbox
    anod_instance.sandbox = sandbox

    driver = e3.anod.driver.AnodDriver(
        anod_instance=anod_instance,
        store=store)

    try:
        driver.activate()
        driver.call(action)
    except AnodError as err:
        print(err, file=sys.stderr)
        sys.exit(1)
Ejemplo n.º 17
0
    def testsuite_main(self, args: Optional[List[str]] = None) -> int:
        """Main for the main testsuite script.

        :param args: Command line arguments. If None, use `sys.argv`.
        :return: The testsuite status code (0 for success, a positive for
            failure).
        """
        self.main = Main(platform_args=True)

        # Add common options
        parser = self.main.argument_parser
        parser.add_argument(
            "-o",
            "--output-dir",
            metavar="DIR",
            default="./out",
            help="select output dir",
        )
        parser.add_argument("-t",
                            "--temp-dir",
                            metavar="DIR",
                            default=Env().tmp_dir)
        parser.add_argument(
            "-d",
            "--dev-temp",
            nargs="?",
            default=None,
            const="tmp",
            help="Unlike --temp-dir, use this very directory to store"
            " testsuite temporaries (i.e. no random subdirectory). Also"
            " automatically disable temp dir cleanup, to be developer"
            " friendly. If no directory is provided, use the local"
            " \"tmp\" directory")
        parser.add_argument(
            "--max-consecutive-failures",
            "-M",
            metavar="N",
            type=int,
            default=self.default_max_consecutive_failures,
            help="Number of test failures (FAIL or ERROR) that trigger the"
            " abortion of the testuite. If zero, this behavior is disabled. In"
            " some cases, aborting the testsuite when there are just too many"
            " failures saves time and costs: the software to test/environment"
            " is too broken, there is no point to continue running the"
            " testsuite.")
        parser.add_argument(
            "--keep-old-output-dir",
            default=False,
            action="store_true",
            help="This is default with this testsuite framework. The option"
            " is kept only to keep backward compatibility of invocation with"
            " former framework (gnatpython.testdriver)",
        )
        parser.add_argument(
            "--disable-cleanup",
            dest="enable_cleanup",
            action="store_false",
            default=True,
            help="disable cleanup of working space",
        )
        parser.add_argument(
            "-j",
            "--jobs",
            dest="jobs",
            type=int,
            metavar="N",
            default=Env().build.cpu.cores,
            help="Specify the number of jobs to run simultaneously",
        )
        parser.add_argument(
            "--show-error-output",
            "-E",
            action="store_true",
            help="When testcases fail, display their output. This is for"
            " convenience for interactive use.",
        )
        parser.add_argument(
            "--show-time-info",
            action="store_true",
            help="Display time information for test results, if available")
        parser.add_argument(
            "--dump-environ",
            dest="dump_environ",
            action="store_true",
            default=False,
            help="Dump all environment variables in a file named environ.sh,"
            " located in the output directory (see --output-dir). This"
            " file can then be sourced from a Bourne shell to recreate"
            " the environement that existed when this testsuite was run"
            " to produce a given testsuite report.",
        )
        parser.add_argument(
            "--xunit-output",
            dest="xunit_output",
            metavar="FILE",
            help="Output testsuite report to the given file in the standard"
            " XUnit XML format. This is useful to display results in"
            " continuous build systems such as Jenkins.",
        )
        parser.add_argument(
            "--gaia-output",
            action="store_true",
            help="Output a GAIA-compatible testsuite report next to the YAML"
            " report.")
        parser.add_argument(
            "--truncate-logs",
            "-T",
            metavar="N",
            type=int,
            default=200,
            help="When outputs (for instance subprocess outputs) exceed 2*N"
            " lines, only include the first and last N lines in logs. This is"
            " necessary when storage for testsuite results have size limits,"
            " and the useful information is generally either at the beginning"
            " or the end of such outputs. If 0, never truncate logs.")
        parser.add_argument(
            "--failure-exit-code",
            metavar="N",
            type=int,
            default=self.default_failure_exit_code,
            help="Exit code the testsuite must use when at least one test"
            " result shows a failure/error. By default, this is"
            f" {self.default_failure_exit_code}. This option is useful when"
            " running a testsuite in a continuous integration setup, as this"
            " can make the testing process stop when there is a regression.")
        parser.add_argument("sublist",
                            metavar="tests",
                            nargs="*",
                            default=[],
                            help="test")
        # Add user defined options
        self.add_options(parser)

        # Parse options
        self.main.parse_args(args)
        assert self.main.args is not None

        # If there is a chance for the logging to end up in a non-tty stream,
        # disable colors. If not, be user-friendly and automatically show error
        # outputs.
        if (self.main.args.log_file or not isatty(sys.stdout)
                or not isatty(sys.stderr)):
            enable_colors = False
        else:  # interactive-only
            enable_colors = True
            self.main.args.show_error_output = True
        self.colors = ColorConfig(enable_colors)
        self.Fore = self.colors.Fore
        self.Style = self.colors.Style

        self.env = BaseEnv.from_env()
        self.env.enable_colors = enable_colors
        self.env.root_dir = self.root_dir
        self.env.test_dir = self.test_dir

        # At this stage compute commonly used paths Keep the working dir as
        # short as possible, to avoid the risk of having a path that's too long
        # (a problem often seen on Windows, or when using WRS tools that have
        # their own max path limitations).
        #
        # Note that we do make sure that working_dir is an absolute path, as we
        # are likely to be changing directories when running each test. A
        # relative path would no longer work under those circumstances.
        d = os.path.abspath(self.main.args.output_dir)
        self.output_dir = os.path.join(d, "new")
        self.old_output_dir = os.path.join(d, "old")

        if self.main.args.dev_temp:
            # Use a temporary directory for developers: make sure it is an
            # empty directory and disable cleanup to ease post-mortem
            # investigation.
            self.working_dir = os.path.abspath(self.main.args.dev_temp)
            rm(self.working_dir, recursive=True)
            mkdir(self.working_dir)
            self.main.args.enable_cleanup = False

        else:
            # If the temp dir is supposed to be randomized, we need to create a
            # subdirectory, so check that the parent directory exists first.
            if not os.path.isdir(self.main.args.temp_dir):
                logger.critical("temp dir '%s' does not exist",
                                self.main.args.temp_dir)
                return 1

            self.working_dir = tempfile.mkdtemp(
                "", "tmp", os.path.abspath(self.main.args.temp_dir))

        # Create the new output directory that will hold the results and create
        # an index for it.
        self.setup_result_dir()
        self.report_index = ReportIndex(self.output_dir)

        # Store in global env: target information and common paths
        self.env.output_dir = self.output_dir
        self.env.working_dir = self.working_dir
        self.env.options = self.main.args

        # User specific startup
        self.set_up()

        # Retrieve the list of test
        self.has_error = False
        self.test_list = self.get_test_list(self.main.args.sublist)

        # Launch the mainloop
        self.total_test = len(self.test_list)
        self.run_test = 0

        self.scheduler = Scheduler(
            job_provider=self.job_factory,
            tokens=self.main.args.jobs,

            # correct_result expects specifically TestFragment instances (a Job
            # subclass), while Scheduler only guarantees Job instances.
            # Test drivers are supposed to register only TestFragment
            # instances, so the following cast should be fine.
            collect=cast(Any, self.collect_result),
        )
        actions = DAG()
        for parsed_test in self.test_list:
            if not self.add_test(actions, parsed_test):
                self.has_error = True
        actions.check()

        with open(os.path.join(self.output_dir, "tests.dot"), "w") as fd:
            fd.write(actions.as_dot())

        # Run the tests. Note that when the testsuite aborts because of too
        # many consecutive test failures, we still want to produce a report and
        # exit through regular ways, to catch KeyboardInterrupt exceptions,
        # which e3's scheduler uses to abort the execution loop, but only in
        # such cases. In other words, let the exception propagates if it's the
        # user that interrupted the testsuite.
        try:
            self.scheduler.run(actions)
        except KeyboardInterrupt:
            if not self.aborted_too_many_failures:  # interactive-only
                raise

        self.report_index.write()
        self.dump_testsuite_result()
        if self.main.args.xunit_output:
            dump_xunit_report(self, self.main.args.xunit_output)
        if self.main.args.gaia_output:
            dump_gaia_report(self, self.output_dir)

        # Clean everything
        self.tear_down()

        # Return the appropriate status code: 1 when there is a framework
        # issue, the failure status code from the --failure-exit-code=N option
        # when there is a least one testcase failure, or 0.
        statuses = {
            s
            for s, count in self.report_index.status_counters.items() if count
        }
        if self.has_error:
            return 1
        elif TestStatus.FAIL in statuses or TestStatus.ERROR in statuses:
            return self.main.args.failure_exit_code
        else:
            return 0
Ejemplo n.º 18
0
    c.add_package(options.package)
    if not options.dry_run:
        c.install_all()


def remove_package_cmd(options):
    cm = CygwinMirror(options.download_dir)
    si = CygwinSetupIni(cm, options.download_dir, options.version)
    c = Cygwin(si)
    c.remove_package(options.package)
    if not options.dry_run:
        c.install_all()


if __name__ == '__main__':
    m = Main()
    parser = m.argument_parser
    parser.description = DESCRIPTION

    # Ensure that if we have already an installation then we used the same
    # version (32bits or 64bits)
    if os.path.isfile('c:/cygwin/etc/rebase.db.x86_64'):
        default_version = 'x86_64'
    elif os.path.isfile('c:/cygwin/etc/rebase.db.x86'):
        default_version = 'x86'
    else:
        default_version = 'x86_64'

    parser.add_argument(
        "--download-dir",
        default='c:\\cygwin_packages',