def add_plan_action(self, plan_action_env: PlanActionEnv, sandbox: Optional[SandBox] = None) -> Optional[Action]: """Add an Anod action to the context. :param plan_action_env: the PlanActionEnv object as returned by PlanContext :param sandbox: the SandBox object that will be used to run commands :return: the root added action or None if this is not an anod action """ action_name = plan_action_env.action if not action_name.startswith( "anod_") or plan_action_env.module is None: return None primitive = action_name.replace("anod_", "", 1) return self.add_anod_action( name=plan_action_env.module, env=self.default_env if plan_action_env.default_build else BaseEnv.from_env(plan_action_env), primitive=primitive, qualifier=plan_action_env.qualifier, source_packages=plan_action_env.source_packages, upload=plan_action_env.push_to_store, plan_line=plan_action_env.plan_line, plan_args=plan_action_env.plan_args, sandbox=sandbox, )
def do_printenv(m: Main, set_prog: bool = True) -> int: """Print the environment for the given spec.""" if set_prog: m.argument_parser.prog = m.argument_parser.prog + " printenv" m.argument_parser.add_argument( "spec_name", help="spec to build. This is " "the basename of an .anod file (without the extension)", ) m.argument_parser.add_argument("--qualifier", help="optional qualifier") m.argument_parser.add_argument( "--sandbox-dir", help="directory in which build artifacts are stored", default=SBX_DIR, ) m.argument_parser.add_argument( "--build-env", help="print build environment", action="store_true", default=False, ) m.parse_args() # Disable logging messages except errors logging.getLogger("").setLevel(logging.ERROR) check_common_tools() ac = create_anod_context(SPEC_DIR) sbx = create_anod_sandbox(m.args.sandbox_dir, SPEC_DIR) anod_instance = ac.add_anod_action( name=m.args.spec_name, primitive="build", qualifier=m.args.qualifier, sandbox=sbx, upload=False, env=BaseEnv.from_env(), ).anod_instance saved_env = {k: v for k, v in os.environ.items()} if m.args.build_env: if hasattr(anod_instance, "build_setenv"): anod_instance.build_setenv() else: if hasattr(anod_instance, "setenv"): anod_instance.setenv() for var, value in os.environ.items(): if var not in saved_env or saved_env[var] != os.environ[var]: print('export %s="%s";' % (var, value)) if m.args.verbose >= 1: print('printf "I set %s=\\"%s\\"\\n\\n";' % (var, value)) print(" ") print(BANNER % m.args.spec_name) return 0
def add_plan_action(self, plan_action_env: PlanActionEnv, sandbox: Optional[SandBox] = None) -> Optional[Action]: """Add an Anod action to the context. :param plan_action_env: the PlanActionEnv object as returned by PlanContext :param sandbox: the SandBox object that will be used to run commands :return: the root added action or None if this is not an anod action """ action_name = plan_action_env.action if not action_name.startswith( "anod_") or plan_action_env.module is None: return None primitive = action_name.replace("anod_", "", 1) if (primitive != "build" and primitive != "install" and primitive != "test" and primitive != "source"): logger.warning(f"Unknown primtive {primitive}") return None elif TYPE_CHECKING: primitive = cast(PRIMITIVE, primitive) return self.add_anod_action( name=plan_action_env.module, env=self.default_env if plan_action_env.default_build else BaseEnv.from_env(plan_action_env), primitive=primitive, qualifier=plan_action_env.qualifier, source_packages=plan_action_env.source_packages, upload=plan_action_env.push_to_store, plan_line=plan_action_env.plan_line, plan_args=plan_action_env.plan_args, sandbox=sandbox, )
def do_build(m: Main, set_prog: bool = True) -> int: """Perform the build.""" if set_prog: m.argument_parser.prog = m.argument_parser.prog + " build" m.argument_parser.add_argument( "spec_name", help="spec to build. This is " "the basename of an .anod file (without the extension)", ) m.argument_parser.add_argument("--qualifier", help="optional qualifier") m.argument_parser.add_argument( "--sandbox-dir", help="directory in which build artefacts are stored", default=SBX_DIR, ) m.argument_parser.add_argument( "--force", help="force rebuild of everything", action="store_true", default=False, ) m.parse_args() check_common_tools() ac = create_anod_context(SPEC_DIR) sbx = create_anod_sandbox(m.args.sandbox_dir, SPEC_DIR) sbx.create_dirs() ac.add_anod_action( name=m.args.spec_name, primitive="build", qualifier=m.args.qualifier, sandbox=sbx, upload=False, env=BaseEnv.from_env(), ) actions = ac.schedule(resolver=ac.always_create_source_resolver) walker = UxasBuilder(actions, sandbox=sbx, force=m.args.force) # TODO: something with walker.job_status['root'], assuming we can get a # useful value there. Right now, it's always 'unknown' # # In the meantime, python > 3.6 guarantees the order of keys in a dict. # The job_status dict has as its penultimate entry the thing we asked to # build or the last thing that failed (the last non-root node). It's ugly, # but _should_ be safe to use this, until we have resolution for root # always reporting unknown. result: ReturnValue = list(walker.job_status.values())[-2] if result in BUILD_SUCCESS: return 0 else: return result.value
def activate(self, sandbox: SandBox, spec_repository: AnodSpecRepository) -> None: self.anod_instance.bind_to_sandbox(sandbox) self.anod_instance.log = e3.log.getLogger("spec." + self.anod_instance.uid) for e in getattr(self.anod_instance, "%s_deps" % self.anod_instance.kind, ()): if isinstance(e, self.anod_instance.Dependency): dep_class = spec_repository.load(e.name) dep_instance = dep_class( qualifier=e.qualifier, kind=e.kind, env=e.env(self.anod_instance, BaseEnv.from_env()), ) self.anod_instance.deps[e.local_name] = dep_instance e3.log.debug("activating spec %s", self.anod_instance.uid)
def activate(self, sandbox, spec_repository): self.anod_instance.build_space = sandbox.get_build_space( name=self.anod_instance.build_space_name, primitive=self.anod_instance.kind, platform=self.anod_instance.env.platform) self.anod_instance.log = e3.log.getLogger('spec.' + self.anod_instance.uid) for e in getattr(self.anod_instance, '%s_deps' % self.anod_instance.kind, ()): if isinstance(e, self.anod_instance.Dependency): dep_class = spec_repository.load(e.name) dep_instance = dep_class(qualifier=e.qualifier, kind=e.kind, env=e.env(self.anod_instance, BaseEnv.from_env())) self.anod_instance.deps[e.local_name] = dep_instance e3.log.debug('activating spec %s', self.anod_instance.uid)
def testsuite_main(self, args=None): """Main for the main testsuite script. :param args: command line arguments. If None use sys.argv :type args: list[str] | None """ self.main = Main(platform_args=self.CROSS_SUPPORT) # Add common options parser = self.main.argument_parser parser.add_argument("-o", "--output-dir", metavar="DIR", default="./out", help="select output dir") parser.add_argument("-t", "--temp-dir", metavar="DIR", default=Env().tmp_dir) parser.add_argument( "--max-consecutive-failures", default=0, help="If there are more than N consecutive failures, the testsuite" " is aborted. If set to 0 (default) then the testsuite will never" " be stopped") parser.add_argument( "--keep-old-output-dir", default=False, action="store_true", help="This is default with this testsuite framework. The option" " is kept only to keep backward compatibility of invocation with" " former framework (gnatpython.testdriver)") parser.add_argument("--disable-cleanup", dest="enable_cleanup", action="store_false", default=True, help="disable cleanup of working space") parser.add_argument( "-j", "--jobs", dest="jobs", type=int, metavar="N", default=Env().build.cpu.cores, help="Specify the number of jobs to run simultaneously") parser.add_argument( "--show-error-output", action="store_true", help="When testcases fail, display their output. This is for" " convenience for interactive use.") parser.add_argument( "--dump-environ", dest="dump_environ", action="store_true", default=False, help="Dump all environment variables in a file named environ.sh," " located in the output directory (see --output-dir). This" " file can then be sourced from a Bourne shell to recreate" " the environement that existed when this testsuite was run" " to produce a given testsuite report.") parser.add_argument('sublist', metavar='tests', nargs='*', default=[], help='test') # Add user defined options self.add_options() # parse options self.main.parse_args(args) self.env = BaseEnv.from_env() self.env.root_dir = self.root_dir self.env.test_dir = self.test_dir # At this stage compute commonly used paths # Keep the working dir as short as possible, to avoid the risk # of having a path that's too long (a problem often seen on # Windows, or when using WRS tools that have their own max path # limitations). # Note that we do make sure that working_dir is an absolute # path, as we are likely to be changing directories when # running each test. A relative path would no longer work # under those circumstances. d = os.path.abspath(self.main.args.output_dir) self.output_dir = os.path.join(d, 'new') self.old_output_dir = os.path.join(d, 'old') if not os.path.isdir(self.main.args.temp_dir): logging.critical("temp dir '%s' does not exist", self.main.args.temp_dir) return 1 self.working_dir = tempfile.mkdtemp( '', 'tmp', os.path.abspath(self.main.args.temp_dir)) # Create the new output directory that will hold the results self.setup_result_dir() # Store in global env: target information and common paths self.env.output_dir = self.output_dir self.env.working_dir = self.working_dir self.env.options = self.main.args # User specific startup self.tear_up() # Retrieve the list of test self.test_list = self.get_test_list(self.main.args.sublist) # Launch the mainloop self.total_test = len(self.test_list) self.run_test = 0 self.scheduler = Scheduler(job_provider=self.job_factory, collect=self.collect_result, tokens=self.main.args.jobs) actions = DAG() for test in self.test_list: self.parse_test(actions, test) with open(os.path.join(self.output_dir, 'tests.dot'), 'wb') as fd: fd.write(actions.as_dot()) self.scheduler.run(actions) self.dump_testsuite_result() # Clean everything self.tear_down() return 0
def testsuite_main(self, args: Optional[List[str]] = None) -> int: """Main for the main testsuite script. :param args: Command line arguments. If None, use `sys.argv`. :return: The testsuite status code (0 for success, a positive for failure). """ self.main = Main(platform_args=True) # Add common options parser = self.main.argument_parser parser.add_argument( "-o", "--output-dir", metavar="DIR", default="./out", help="select output dir", ) parser.add_argument("-t", "--temp-dir", metavar="DIR", default=Env().tmp_dir) parser.add_argument( "-d", "--dev-temp", nargs="?", default=None, const="tmp", help="Unlike --temp-dir, use this very directory to store" " testsuite temporaries (i.e. no random subdirectory). Also" " automatically disable temp dir cleanup, to be developer" " friendly. If no directory is provided, use the local" " \"tmp\" directory") parser.add_argument( "--max-consecutive-failures", "-M", metavar="N", type=int, default=self.default_max_consecutive_failures, help="Number of test failures (FAIL or ERROR) that trigger the" " abortion of the testuite. If zero, this behavior is disabled. In" " some cases, aborting the testsuite when there are just too many" " failures saves time and costs: the software to test/environment" " is too broken, there is no point to continue running the" " testsuite.") parser.add_argument( "--keep-old-output-dir", default=False, action="store_true", help="This is default with this testsuite framework. The option" " is kept only to keep backward compatibility of invocation with" " former framework (gnatpython.testdriver)", ) parser.add_argument( "--disable-cleanup", dest="enable_cleanup", action="store_false", default=True, help="disable cleanup of working space", ) parser.add_argument( "-j", "--jobs", dest="jobs", type=int, metavar="N", default=Env().build.cpu.cores, help="Specify the number of jobs to run simultaneously", ) parser.add_argument( "--show-error-output", "-E", action="store_true", help="When testcases fail, display their output. This is for" " convenience for interactive use.", ) parser.add_argument( "--show-time-info", action="store_true", help="Display time information for test results, if available") parser.add_argument( "--dump-environ", dest="dump_environ", action="store_true", default=False, help="Dump all environment variables in a file named environ.sh," " located in the output directory (see --output-dir). This" " file can then be sourced from a Bourne shell to recreate" " the environement that existed when this testsuite was run" " to produce a given testsuite report.", ) parser.add_argument( "--xunit-output", dest="xunit_output", metavar="FILE", help="Output testsuite report to the given file in the standard" " XUnit XML format. This is useful to display results in" " continuous build systems such as Jenkins.", ) parser.add_argument( "--gaia-output", action="store_true", help="Output a GAIA-compatible testsuite report next to the YAML" " report.") parser.add_argument( "--truncate-logs", "-T", metavar="N", type=int, default=200, help="When outputs (for instance subprocess outputs) exceed 2*N" " lines, only include the first and last N lines in logs. This is" " necessary when storage for testsuite results have size limits," " and the useful information is generally either at the beginning" " or the end of such outputs. If 0, never truncate logs.") parser.add_argument( "--failure-exit-code", metavar="N", type=int, default=self.default_failure_exit_code, help="Exit code the testsuite must use when at least one test" " result shows a failure/error. By default, this is" f" {self.default_failure_exit_code}. This option is useful when" " running a testsuite in a continuous integration setup, as this" " can make the testing process stop when there is a regression.") parser.add_argument("sublist", metavar="tests", nargs="*", default=[], help="test") # Add user defined options self.add_options(parser) # Parse options self.main.parse_args(args) assert self.main.args is not None # If there is a chance for the logging to end up in a non-tty stream, # disable colors. If not, be user-friendly and automatically show error # outputs. if (self.main.args.log_file or not isatty(sys.stdout) or not isatty(sys.stderr)): enable_colors = False else: # interactive-only enable_colors = True self.main.args.show_error_output = True self.colors = ColorConfig(enable_colors) self.Fore = self.colors.Fore self.Style = self.colors.Style self.env = BaseEnv.from_env() self.env.enable_colors = enable_colors self.env.root_dir = self.root_dir self.env.test_dir = self.test_dir # At this stage compute commonly used paths Keep the working dir as # short as possible, to avoid the risk of having a path that's too long # (a problem often seen on Windows, or when using WRS tools that have # their own max path limitations). # # Note that we do make sure that working_dir is an absolute path, as we # are likely to be changing directories when running each test. A # relative path would no longer work under those circumstances. d = os.path.abspath(self.main.args.output_dir) self.output_dir = os.path.join(d, "new") self.old_output_dir = os.path.join(d, "old") if self.main.args.dev_temp: # Use a temporary directory for developers: make sure it is an # empty directory and disable cleanup to ease post-mortem # investigation. self.working_dir = os.path.abspath(self.main.args.dev_temp) rm(self.working_dir, recursive=True) mkdir(self.working_dir) self.main.args.enable_cleanup = False else: # If the temp dir is supposed to be randomized, we need to create a # subdirectory, so check that the parent directory exists first. if not os.path.isdir(self.main.args.temp_dir): logger.critical("temp dir '%s' does not exist", self.main.args.temp_dir) return 1 self.working_dir = tempfile.mkdtemp( "", "tmp", os.path.abspath(self.main.args.temp_dir)) # Create the new output directory that will hold the results and create # an index for it. self.setup_result_dir() self.report_index = ReportIndex(self.output_dir) # Store in global env: target information and common paths self.env.output_dir = self.output_dir self.env.working_dir = self.working_dir self.env.options = self.main.args # User specific startup self.set_up() # Retrieve the list of test self.has_error = False self.test_list = self.get_test_list(self.main.args.sublist) # Launch the mainloop self.total_test = len(self.test_list) self.run_test = 0 self.scheduler = Scheduler( job_provider=self.job_factory, tokens=self.main.args.jobs, # correct_result expects specifically TestFragment instances (a Job # subclass), while Scheduler only guarantees Job instances. # Test drivers are supposed to register only TestFragment # instances, so the following cast should be fine. collect=cast(Any, self.collect_result), ) actions = DAG() for parsed_test in self.test_list: if not self.add_test(actions, parsed_test): self.has_error = True actions.check() with open(os.path.join(self.output_dir, "tests.dot"), "w") as fd: fd.write(actions.as_dot()) # Run the tests. Note that when the testsuite aborts because of too # many consecutive test failures, we still want to produce a report and # exit through regular ways, to catch KeyboardInterrupt exceptions, # which e3's scheduler uses to abort the execution loop, but only in # such cases. In other words, let the exception propagates if it's the # user that interrupted the testsuite. try: self.scheduler.run(actions) except KeyboardInterrupt: if not self.aborted_too_many_failures: # interactive-only raise self.report_index.write() self.dump_testsuite_result() if self.main.args.xunit_output: dump_xunit_report(self, self.main.args.xunit_output) if self.main.args.gaia_output: dump_gaia_report(self, self.output_dir) # Clean everything self.tear_down() # Return the appropriate status code: 1 when there is a framework # issue, the failure status code from the --failure-exit-code=N option # when there is a least one testcase failure, or 0. statuses = { s for s, count in self.report_index.status_counters.items() if count } if self.has_error: return 1 elif TestStatus.FAIL in statuses or TestStatus.ERROR in statuses: return self.main.args.failure_exit_code else: return 0