def _body( self, session: LegacyGraphSession, options: Options, options_bootstrapper: OptionsBootstrapper, ) -> Tuple[Specs, int]: global_options = options.for_global_scope() specs = SpecsCalculator.create( options=options, session=session.scheduler_session, exclude_patterns=tuple(global_options.exclude_target_regexp) if global_options.exclude_target_regexp else tuple(), tags=tuple(global_options.tag) if global_options.tag else tuple(), ) exit_code = PANTS_SUCCEEDED_EXIT_CODE v1_goals, ambiguous_goals, v2_goals = options.goals_by_version if v2_goals or (ambiguous_goals and global_options.v2): goals = v2_goals + (ambiguous_goals if global_options.v2 else tuple()) # N.B. @goal_rules run pre-fork in order to cache the products they request during execution. exit_code = session.run_goal_rules( options_bootstrapper=options_bootstrapper, union_membership=self._union_membership, options=options, goals=goals, specs=specs, ) return specs, exit_code
def create( cls, env: Mapping[str, str], options_bootstrapper: OptionsBootstrapper, scheduler: Optional[LegacyGraphScheduler] = None, ) -> "LocalPantsRunner": """Creates a new LocalPantsRunner instance by parsing options. By the time this method runs, logging will already have been initialized in either PantsRunner or DaemonPantsRunner. :param env: The environment (e.g. os.environ) for this run. :param options_bootstrapper: The OptionsBootstrapper instance to reuse. :param scheduler: If being called from the daemon, a warmed scheduler to use. """ build_root = get_buildroot() global_bootstrap_options = options_bootstrapper.bootstrap_options.for_global_scope( ) options, build_config = LocalPantsRunner.parse_options( options_bootstrapper) # Option values are usually computed lazily on demand, # but command line options are eagerly computed for validation. for scope in options.scope_to_flags.keys(): options.for_scope(scope) # Verify configs. if global_bootstrap_options.verify_config: options.verify_configs(options_bootstrapper.config) union_membership = UnionMembership(build_config.union_rules()) # If we're running with the daemon, we'll be handed a warmed Scheduler, which we use # to initialize a session here. graph_session = cls._init_graph_session(options_bootstrapper, build_config, options, scheduler) global_options = options.for_global_scope() specs = SpecsCalculator.create( options=options, build_root=build_root, session=graph_session.scheduler_session, exclude_patterns=tuple(global_options.exclude_target_regexp), tags=tuple(global_options.tag), ) profile_path = env.get("PANTS_PROFILE") return cls( build_root=build_root, options=options, options_bootstrapper=options_bootstrapper, build_config=build_config, specs=specs, graph_session=graph_session, union_membership=union_membership, profile_path=profile_path, _run_tracker=RunTracker.global_instance(), )
def execute_rule( self, args: Optional[Iterable[str]] = None, global_args: Optional[Iterable[str]] = None, env: Optional[Dict[str, str]] = None, exit_code: int = 0, additional_params: Optional[Iterable[Any]] = None, ) -> str: """Executes the @goal_rule for this test class. :API: public Returns the text output of the task. """ # Create an OptionsBootstrapper for these args/env, and a captured Console instance. options_bootstrapper = create_options_bootstrapper( args=(*(global_args or []), self.goal_cls.name, *(args or [])), env=env, ) BuildConfigInitializer.get(options_bootstrapper) full_options = options_bootstrapper.get_full_options( [*GlobalOptions.known_scope_infos(), *self.goal_cls.subsystem_cls.known_scope_infos()] ) stdout, stderr = StringIO(), StringIO() console = Console(stdout=stdout, stderr=stderr) scheduler = self.scheduler workspace = Workspace(scheduler) # Run for the target specs parsed from the args. specs = SpecsCalculator.parse_specs(full_options.specs, self.build_root) params = Params( specs.provided_specs, console, options_bootstrapper, workspace, *(additional_params or []), ) actual_exit_code = self.scheduler.run_goal_rule(self.goal_cls, params) # Flush and capture console output. console.flush() stdout_val = stdout.getvalue() stderr_val = stderr.getvalue() assert ( exit_code == actual_exit_code ), f"Exited with {actual_exit_code} (expected {exit_code}):\nstdout:\n{stdout_val}\nstderr:\n{stderr_val}" return stdout_val
def test_resolve_sources_snapshot(self) -> None: """This tests that convert filesystem specs and/or address specs into a single snapshot. Some important edge cases: - When a filesystem spec refers to a file without any owning target, it should be included in the snapshot. - If a file is covered both by an address spec and by a filesystem spec, we should merge it so that the file only shows up once. """ self.create_files("demo", ["f1.txt", "f2.txt"]) self.add_to_build_file("demo", "target(sources=['*.txt'])") specs = SpecsCalculator.parse_specs(["demo:demo", "demo/f1.txt", "demo/BUILD"]) result = self.request_single_product( SourcesSnapshot, Params(specs, create_options_bootstrapper()) ) assert result.snapshot.files == ("demo/BUILD", "demo/f1.txt", "demo/f2.txt")
def _maybe_init_specs( specs: Optional[Specs], graph_session: LegacyGraphSession, options: Options, build_root: str, ) -> Specs: if specs: return specs global_options = options.for_global_scope() return SpecsCalculator.create( options=options, build_root=build_root, session=graph_session.scheduler_session, exclude_patterns=tuple(global_options.exclude_target_regexp), tags=tuple(global_options.tag))
def test_resolve_addresses(self) -> None: """This tests that we correctly handle resolving from both address and filesystem specs.""" self.create_file("fs_spec/f.txt") self.add_to_build_file("fs_spec", "target(sources=['f.txt'])") self.create_file("address_spec/f.txt") self.add_to_build_file("address_spec", "target(sources=['f.txt'])") no_interaction_specs = ["fs_spec/f.txt", "address_spec:address_spec"] # If a generated subtarget's original base target is included via an address spec, # we will still include the generated subtarget for consistency. When we expand Targets # into their base targets this redundancy is removed, but during Address expansion we # get literal matches. self.create_files("multiple_files", ["f1.txt", "f2.txt"]) self.add_to_build_file("multiple_files", "target(sources=['*.txt'])") multiple_files_specs = ["multiple_files/f2.txt", "multiple_files:multiple_files"] specs = SpecsCalculator.parse_specs([*no_interaction_specs, *multiple_files_specs]) result = self.request_single_product( AddressesWithOrigins, Params(specs, create_options_bootstrapper()) ) assert set(result) == { AddressWithOrigin( Address("fs_spec", relative_file_path="f.txt"), origin=FilesystemLiteralSpec("fs_spec/f.txt"), ), AddressWithOrigin( Address("address_spec"), origin=SingleAddress("address_spec", "address_spec"), ), AddressWithOrigin( Address("multiple_files"), origin=SingleAddress("multiple_files", "multiple_files"), ), AddressWithOrigin( Address("multiple_files", relative_file_path="f2.txt"), origin=FilesystemLiteralSpec(file="multiple_files/f2.txt"), ), }
def run(self): # Ensure anything referencing sys.argv inherits the Pailgun'd args. sys.argv = self.args # Invoke a Pants run with stdio redirected and a proxied environment. with self.nailgunned_stdio( self.maybe_shutdown_socket, self.env) as finalizer, DaemonExiter.override_global_exiter( self.maybe_shutdown_socket, finalizer), hermetic_environment_as(**self.env): exit_code = PANTS_SUCCEEDED_EXIT_CODE try: # Clean global state. clean_global_runtime_state(reset_subsystem=True) options_bootstrapper = OptionsBootstrapper.create( args=self.args, env=self.env) options, build_config = LocalPantsRunner.parse_options( options_bootstrapper) global_options = options.for_global_scope() session = self.scheduler_service.prepare_graph(options) specs = SpecsCalculator.create( options=options, session=session.scheduler_session, exclude_patterns=tuple( global_options.exclude_target_regexp), tags=tuple(global_options.tag) if global_options.tag else (), ) if options.help_request: help_printer = HelpPrinter( options=options, union_membership=UnionMembership( build_config.union_rules()), ) exit_code = help_printer.print_help() else: exit_code = self.scheduler_service.graph_run_v2( session, specs, options, options_bootstrapper) # self.scheduler_service.graph_run_v2 will already run v2 or ambiguous goals. We should # only enter this code path if v1 is set. if global_options.v1: with ExceptionSink.exiter_as_until_exception( lambda _: PantsRunFailCheckerExiter()): runner = LocalPantsRunner.create( self.env, options_bootstrapper, specs, session) env_start_time = self.env.pop( "PANTSD_RUNTRACKER_CLIENT_START_TIME", None) start_time = float( env_start_time) if env_start_time else None runner.set_start_time(start_time) runner.run() except KeyboardInterrupt: self._exiter.exit_and_fail("Interrupted by user.\n") except _PantsRunFinishedWithFailureException as e: ExceptionSink.log_exception( "Pants run failed with exception: {}; exiting".format(e)) self._exiter.exit(e.exit_code) except Exception as e: # TODO: We override sys.excepthook above when we call ExceptionSink.set_exiter(). That # excepthook catches `SignalHandledNonLocalExit`s from signal handlers, which isn't # happening here, so something is probably overriding the excepthook. By catching Exception # and calling this method, we emulate the normal, expected sys.excepthook override. ExceptionSink._log_unhandled_exception_and_exit(exc=e) else: self._exiter.exit(exit_code)
def create( cls, env: Mapping[str, str], options_bootstrapper: OptionsBootstrapper, specs: Optional[Specs] = None, daemon_graph_session: Optional[LegacyGraphSession] = None, ) -> "LocalPantsRunner": """Creates a new LocalPantsRunner instance by parsing options. :param env: The environment (e.g. os.environ) for this run. :param options_bootstrapper: The OptionsBootstrapper instance to reuse. :param specs: The specs for this run, i.e. either the address or filesystem specs. :param daemon_graph_session: The graph helper for this session. """ build_root = get_buildroot() global_options = options_bootstrapper.bootstrap_options.for_global_scope( ) # This works as expected due to the encapsulated_logger in DaemonPantsRunner and # we don't have to gate logging setup anymore. level = LogLevel.ERROR if getattr(global_options, "quiet", False) else global_options.level ignores = global_options.ignore_pants_warnings clear_previous_loggers() setup_logging_to_stderr(level, warnings_filter_regexes=ignores) log_dir = global_options.logdir if log_dir: setup_logging_to_file(level, log_dir=log_dir, warnings_filter_regexes=ignores) options, build_config = LocalPantsRunner.parse_options( options_bootstrapper) # Option values are usually computed lazily on demand, # but command line options are eagerly computed for validation. for scope in options.scope_to_flags.keys(): options.for_scope(scope) # Verify configs. if global_options.verify_config: options.verify_configs(options_bootstrapper.config) union_membership = UnionMembership(build_config.union_rules()) # If we're running with the daemon, we'll be handed a session from the # resident graph helper - otherwise initialize a new one here. graph_session = (daemon_graph_session if daemon_graph_session else cls._init_graph_session( options_bootstrapper, build_config, options)) if specs is None: global_options = options.for_global_scope() specs = SpecsCalculator.create( options=options, build_root=build_root, session=graph_session.scheduler_session, exclude_patterns=tuple(global_options.exclude_target_regexp), tags=tuple(global_options.tag), ) profile_path = env.get("PANTS_PROFILE") return cls( build_root=build_root, options=options, options_bootstrapper=options_bootstrapper, build_config=build_config, specs=specs, graph_session=graph_session, union_membership=union_membership, is_daemon=daemon_graph_session is not None, profile_path=profile_path, )