def prepare_v1_graph_run_v2( self, options: Options, options_bootstrapper: OptionsBootstrapper, ) -> Tuple[LegacyGraphSession, Specs, int]: """For v1 (and v2): computing Specs for a later v1 run. For v2: running an entire v2 run The exit_code in the return indicates whether any issue was encountered """ # If any nodes exist in the product graph, wait for the initial watchman event to avoid # racing watchman startup vs invalidation events. graph_len = self._scheduler.graph_len() if graph_len > 0: self._logger.debug( "graph len was {}, waiting for initial watchman event".format( graph_len)) self._watchman_is_running.wait() build_id = RunTracker.global_instance().run_id v2_ui = options.for_global_scope().get("v2_ui", False) zipkin_trace_v2 = options.for_scope("reporting").zipkin_trace_v2 session = self._graph_helper.new_session(zipkin_trace_v2, build_id, v2_ui) if options.for_global_scope().get("loop", False): fn = self._loop else: fn = self._body specs, exit_code = fn(session, options, options_bootstrapper) return session, specs, exit_code
def _init_graph_session( options_bootstrapper: OptionsBootstrapper, build_config: BuildConfiguration, options: Options, ) -> LegacyGraphSession: native = Native() native.set_panic_handler() graph_scheduler_helper = EngineInitializer.setup_legacy_graph( native, options_bootstrapper, build_config) v2_ui = options.for_global_scope().get("v2_ui", False) zipkin_trace_v2 = options.for_scope("reporting").zipkin_trace_v2 # TODO(#8658) This should_report_workunits flag must be set to True for # StreamingWorkunitHandler to receive WorkUnits. It should eventually # be merged with the zipkin_trace_v2 flag, since they both involve most # of the same engine functionality, but for now is separate to avoid # breaking functionality associated with zipkin tracing while iterating on streaming workunit reporting. stream_workunits = len( options.for_global_scope().streaming_workunits_handlers) != 0 return graph_scheduler_helper.new_session( zipkin_trace_v2, RunTracker.global_instance().run_id, v2_ui, should_report_workunits=stream_workunits, )
def _init_graph_session( cls, options_bootstrapper: OptionsBootstrapper, build_config: BuildConfiguration, options: Options, scheduler: Optional[GraphScheduler] = None, ) -> GraphSession: native = Native() native.set_panic_handler() graph_scheduler_helper = scheduler or EngineInitializer.setup_graph( options_bootstrapper, build_config) try: global_scope = options.for_global_scope() except UnknownFlagsError as err: cls._handle_unknown_flags(err, options_bootstrapper) raise stream_workunits = len( options.for_global_scope().streaming_workunits_handlers) != 0 return graph_scheduler_helper.new_session( RunTracker.global_instance().run_id, dynamic_ui=global_scope.dynamic_ui, use_colors=global_scope.get("colors", True), should_report_workunits=stream_workunits, session_values=SessionValues({ OptionsBootstrapper: options_bootstrapper, PantsEnvironment: PantsEnvironment(os.environ), }), )
def __init__( self, root_dir: str, options: Options, build_config: BuildConfiguration, run_tracker: RunTracker, reporting: Reporting, graph_session: LegacyGraphSession, target_roots: TargetRoots, exiter=sys.exit, ) -> None: """ :param root_dir: The root directory of the pants workspace (aka the "build root"). :param options: The global, pre-initialized Options instance. :param build_config: A pre-initialized BuildConfiguration instance. :param run_tracker: The global, pre-initialized/running RunTracker instance. :param reporting: The global, pre-initialized Reporting instance. :param graph_session: The graph session for this run. :param target_roots: A pre-existing `TargetRoots` object, if available. :param func exiter: A function that accepts an exit code value and exits. (for tests, Optional) """ self._root_dir = root_dir self._options = options self._build_config = build_config self._run_tracker = run_tracker self._reporting = reporting self._graph_session = graph_session self._target_roots = target_roots self._exiter = exiter self._global_options = options.for_global_scope() self._fail_fast = self._global_options.fail_fast self._explain = self._global_options.explain self._kill_nailguns = self._global_options.kill_nailguns
def __init__(self, bootstrap_options: Options, daemon_entrypoint: str): super().__init__( name="pantsd", metadata_base_dir=bootstrap_options.for_global_scope().pants_subprocessdir, ) self._bootstrap_options = bootstrap_options self._daemon_entrypoint = daemon_entrypoint
def _init_graph_session( cls, options_initializer: OptionsInitializer, options_bootstrapper: OptionsBootstrapper, build_config: BuildConfiguration, env: CompleteEnvironment, run_id: str, options: Options, scheduler: Optional[GraphScheduler] = None, cancellation_latch: Optional[PySessionCancellationLatch] = None, ) -> GraphSession: native = Native() native.set_panic_handler() graph_scheduler_helper = scheduler or EngineInitializer.setup_graph( options_bootstrapper, build_config, env ) with options_initializer.handle_unknown_flags(options_bootstrapper, env, raise_=True): global_options = options.for_global_scope() return graph_scheduler_helper.new_session( run_id, dynamic_ui=global_options.dynamic_ui, use_colors=global_options.get("colors", True), session_values=SessionValues( { OptionsBootstrapper: options_bootstrapper, CompleteEnvironment: env, } ), cancellation_latch=cancellation_latch, )
def __init__( self, native: Native, build_root: str, work_dir: str, log_level: LogLevel, services: PantsServices, metadata_base_dir: str, bootstrap_options: Options, ): """ NB: A PantsDaemon instance is generally instantiated via `create`. :param native: A `Native` instance. :param build_root: The pants build root. :param work_dir: The pants work directory. :param log_level: The log level to use for daemon logging. :param services: A registry of services to use in this run. :param metadata_base_dir: The ProcessManager metadata base dir. :param bootstrap_options: The bootstrap options. """ super().__init__(bootstrap_options, daemon_entrypoint=__name__) self._native = native self._build_root = build_root self._work_dir = work_dir self._log_level = log_level self._services = services self._bootstrap_options = bootstrap_options self._log_show_rust_3rdparty = ( bootstrap_options.for_global_scope().log_show_rust_3rdparty if bootstrap_options else True) self._logger = logging.getLogger(__name__) # N.B. This Event is used as nothing more than a convenient atomic flag - nothing waits on it. self._kill_switch = threading.Event()
def _body( self, session: LegacyGraphSession, options: Options, options_bootstrapper: OptionsBootstrapper, ) -> Tuple[Specs, int]: global_options = options.for_global_scope() specs = SpecsCalculator.create( options=options, session=session.scheduler_session, exclude_patterns=tuple(global_options.exclude_target_regexp) if global_options.exclude_target_regexp else tuple(), tags=tuple(global_options.tag) if global_options.tag else tuple(), ) exit_code = PANTS_SUCCEEDED_EXIT_CODE v1_goals, ambiguous_goals, v2_goals = options.goals_by_version if v2_goals or (ambiguous_goals and global_options.v2): goals = v2_goals + (ambiguous_goals if global_options.v2 else tuple()) # N.B. @goal_rules run pre-fork in order to cache the products they request during execution. exit_code = session.run_goal_rules( options_bootstrapper=options_bootstrapper, union_membership=self._union_membership, options=options, goals=goals, specs=specs, ) return specs, exit_code
def _init_graph_session( cls, options_initializer: OptionsInitializer, options_bootstrapper: OptionsBootstrapper, build_config: BuildConfiguration, env: CompleteEnvironment, run_id: str, options: Options, scheduler: Optional[GraphScheduler] = None, cancellation_latch: Optional[PySessionCancellationLatch] = None, ) -> GraphSession: native_engine.maybe_set_panic_handler() if scheduler is None: dynamic_remote_options, _ = DynamicRemoteOptions.from_options(options, env) bootstrap_options = options.bootstrap_option_values() assert bootstrap_options is not None scheduler = EngineInitializer.setup_graph( bootstrap_options, build_config, dynamic_remote_options ) with options_initializer.handle_unknown_flags(options_bootstrapper, env, raise_=True): global_options = options.for_global_scope() return scheduler.new_session( run_id, dynamic_ui=global_options.dynamic_ui, use_colors=global_options.get("colors", True), session_values=SessionValues( { OptionsBootstrapper: options_bootstrapper, CompleteEnvironment: env, } ), cancellation_latch=cancellation_latch, )
def __init__( self, native: Native, work_dir: str, log_level: LogLevel, server: Any, core: PantsDaemonCore, metadata_base_dir: str, bootstrap_options: Options, ): """ NB: A PantsDaemon instance is generally instantiated via `create`. :param native: A `Native` instance. :param work_dir: The pants work directory. :param log_level: The log level to use for daemon logging. :param server: A native PyNailgunServer instance (not currently a nameable type). :param core: A PantsDaemonCore. :param metadata_base_dir: The ProcessManager metadata base dir. :param bootstrap_options: The bootstrap options. """ super().__init__(bootstrap_options, daemon_entrypoint=__name__) self._native = native self._build_root = get_buildroot() self._work_dir = work_dir self._log_level = log_level self._server = server self._core = core self._bootstrap_options = bootstrap_options self._log_show_rust_3rdparty = ( bootstrap_options.for_global_scope().log_show_rust_3rdparty if bootstrap_options else True) self._logger = logging.getLogger(__name__)
def _init_graph_session( cls, options_bootstrapper: OptionsBootstrapper, build_config: BuildConfiguration, run_id: str, options: Options, scheduler: Optional[GraphScheduler] = None, cancellation_latch: Optional[PySessionCancellationLatch] = None, ) -> GraphSession: native = Native() native.set_panic_handler() graph_scheduler_helper = scheduler or EngineInitializer.setup_graph( options_bootstrapper, build_config ) try: global_scope = options.for_global_scope() except UnknownFlagsError as err: cls._handle_unknown_flags(err, options_bootstrapper) raise return graph_scheduler_helper.new_session( run_id, dynamic_ui=global_scope.dynamic_ui, use_colors=global_scope.get("colors", True), session_values=SessionValues( { OptionsBootstrapper: options_bootstrapper, PantsEnvironment: PantsEnvironment(os.environ), } ), cancellation_latch=cancellation_latch, )
def run_goal_rules( self, *, options_bootstrapper: OptionsBootstrapper, union_membership: UnionMembership, options: Options, goals: Iterable[str], specs: Specs, ): """Runs @goal_rules sequentially and interactively by requesting their implicit Goal products. For retryable failures, raises scheduler.ExecutionError. :returns: An exit code. """ global_options = options.for_global_scope() console = Console( use_colors=global_options.colors, session=self.scheduler_session if global_options.get("v2_ui") else None, ) workspace = Workspace(self.scheduler_session) interactive_runner = InteractiveRunner(self.scheduler_session) for goal in goals: goal_product = self.goal_map[goal] # NB: We no-op for goals that have no V2 implementation because no relevant backends are # registered. This allows us to safely set `--v1 --v2`, even if no V2 backends are registered. # Once V1 is removed, we might want to reconsider the behavior to instead warn or error when # trying to run something like `./pants run` without any backends registered. is_implemented = union_membership.has_members_for_all( goal_product.subsystem_cls.required_union_implementations) if not is_implemented: continue params = Params( specs.provided_specs, options_bootstrapper, console, workspace, interactive_runner, ) logger.debug( f"requesting {goal_product} to satisfy execution of `{goal}` goal" ) try: exit_code = self.scheduler_session.run_goal_rule( goal_product, params) finally: console.flush() if exit_code != PANTS_SUCCEEDED_EXIT_CODE: return exit_code return PANTS_SUCCEEDED_EXIT_CODE
def __init__( self, root_dir: str, options_bootstrapper: OptionsBootstrapper, options: Options, build_config: BuildConfiguration, run_tracker: RunTracker, reporting: Reporting, graph_session: LegacyGraphSession, specs: Specs, exiter=sys.exit, ) -> None: """ :param root_dir: The root directory of the pants workspace (aka the "build root"). :param options: The global, pre-initialized Options instance. :param build_config: A pre-initialized BuildConfiguration instance. :param run_tracker: The global, pre-initialized/running RunTracker instance. :param reporting: The global, pre-initialized Reporting instance. :param graph_session: The graph session for this run. :param specs: The specs for this run, i.e. either the address or filesystem specs. :param func exiter: A function that accepts an exit code value and exits. (for tests, Optional) """ self._root_dir = root_dir self._options_bootstrapper = options_bootstrapper self._options = options self._build_config = build_config self._run_tracker = run_tracker self._reporting = reporting self._graph_session = graph_session self._specs = specs self._exiter = exiter self._global_options = options.for_global_scope() self._fail_fast = self._global_options.fail_fast self._explain = self._global_options.explain self._kill_nailguns = self._global_options.kill_nailguns # V1 tasks do not understand FilesystemSpecs, so we eagerly convert them into AddressSpecs. if self._specs.filesystem_specs.dependencies: (owned_addresses, ) = self._graph_session.scheduler_session.product_request( Addresses, [ Params(self._specs.filesystem_specs, self._options_bootstrapper) ]) updated_address_specs = AddressSpecs( dependencies=tuple( SingleAddress(a.spec_path, a.target_name) for a in owned_addresses), tags=self._specs.address_specs.matcher.tags, exclude_patterns=self._specs.address_specs.matcher. exclude_patterns, ) self._specs = Specs( address_specs=updated_address_specs, filesystem_specs=FilesystemSpecs([]), )
def __init__(self, options: Options): super().__init__(options.for_global_scope().colors) self._all_help_info = HelpInfoExtracter.get_all_help_info( options, # We only care about the options-related help info, so we pass in # dummy values for the other arguments. UnionMembership({}), lambda x: tuple(), RegisteredTargetTypes({}), )
def __init__(self, options: Options): super().__init__(options.for_global_scope().colors) self._bin_name = options.for_global_scope().pants_bin_name self._all_help_info = HelpInfoExtracter.get_all_help_info( options, # We only care about the options-related help info, so we pass in # dummy values for union_membership and consumed_scopes_mapper. UnionMembership({}), lambda x: tuple(), )
def _init_graph_session( options_bootstrapper: OptionsBootstrapper, build_config: BuildConfiguration, options: Options, scheduler: Optional[LegacyGraphScheduler] = None, ) -> LegacyGraphSession: native = Native() native.set_panic_handler() graph_scheduler_helper = scheduler or EngineInitializer.setup_legacy_graph( options_bootstrapper, build_config) global_scope = options.for_global_scope() if global_scope.v2: dynamic_ui = resolve_conflicting_options( old_option="v2_ui", new_option="dynamic_ui", old_scope=GLOBAL_SCOPE, new_scope=GLOBAL_SCOPE, old_container=global_scope, new_container=global_scope, ) else: dynamic_ui = False use_colors = global_scope.get("colors", True) zipkin_trace_v2 = options.for_scope("reporting").zipkin_trace_v2 # TODO(#8658) This should_report_workunits flag must be set to True for # StreamingWorkunitHandler to receive WorkUnits. It should eventually # be merged with the zipkin_trace_v2 flag, since they both involve most # of the same engine functionality, but for now is separate to avoid # breaking functionality associated with zipkin tracing while iterating on streaming workunit reporting. stream_workunits = len( options.for_global_scope().streaming_workunits_handlers) != 0 return graph_scheduler_helper.new_session( zipkin_trace_v2, RunTracker.global_instance().run_id, dynamic_ui=dynamic_ui, use_colors=use_colors, should_report_workunits=stream_workunits, )
def get_bootstrap_option_values(env=None, config=None, args=None, buildroot=None): """Get the values of just the bootstrap options.""" # Filter just the bootstrap args, so we don't choke on other global-scope args on the cmd line. flags = set() def capture_the_flags(*args, **kwargs): flags.update(args) register_bootstrap_options(capture_the_flags, buildroot=buildroot) bargs = filter(lambda x: x.partition('=')[0] in flags, args) bootstrap_options = Options(env=env, config=config, known_scopes=[GLOBAL_SCOPE], args=bargs) register_bootstrap_options(bootstrap_options.register_global, buildroot=buildroot) return bootstrap_options.for_global_scope()
def prepare_graph(self, options: Options) -> LegacyGraphSession: # If any nodes exist in the product graph, wait for the initial watchman event to avoid # racing watchman startup vs invalidation events. graph_len = self._scheduler.graph_len() if graph_len > 0: self._logger.debug(f"graph len was {graph_len}, waiting for initial watchman event") self._watchman_is_running.wait() global_options = options.for_global_scope() build_id = RunTracker.global_instance().run_id v2_ui = global_options.get("v2_ui", False) zipkin_trace_v2 = options.for_scope("reporting").zipkin_trace_v2 return self._graph_helper.new_session(zipkin_trace_v2, build_id, v2_ui)
def _init_graph_session( options_bootstrapper: OptionsBootstrapper, build_config: BuildConfiguration, options: Options, scheduler: Optional[LegacyGraphScheduler] = None, ) -> LegacyGraphSession: native = Native() native.set_panic_handler() graph_scheduler_helper = scheduler or EngineInitializer.setup_legacy_graph( options_bootstrapper, build_config) global_scope = options.for_global_scope() dynamic_ui = global_scope.dynamic_ui if global_scope.v2 else False use_colors = global_scope.get("colors", True) stream_workunits = len( options.for_global_scope().streaming_workunits_handlers) != 0 return graph_scheduler_helper.new_session( RunTracker.global_instance().run_id, dynamic_ui=dynamic_ui, use_colors=use_colors, should_report_workunits=stream_workunits, )
def _maybe_init_specs( specs: Optional[Specs], graph_session: LegacyGraphSession, options: Options, build_root: str, ) -> Specs: if specs: return specs global_options = options.for_global_scope() return SpecsCalculator.create( options=options, build_root=build_root, session=graph_session.scheduler_session, exclude_patterns=tuple(global_options.exclude_target_regexp), tags=tuple(global_options.tag))
def run_goal_rules( self, options_bootstrapper: OptionsBootstrapper, options: Options, goals: Iterable[str], specs: Specs, ): """Runs @goal_rules sequentially and interactively by requesting their implicit Goal products. For retryable failures, raises scheduler.ExecutionError. :returns: An exit code. """ global_options = options.for_global_scope() console = Console( use_colors=global_options.colors, session=self.scheduler_session if global_options.get('v2_ui') else None, ) workspace = Workspace(self.scheduler_session) interactive_runner = InteractiveRunner(self.scheduler_session) for goal in goals: goal_product = self.goal_map[goal] params = Params( specs.provided_specs, options_bootstrapper, console, workspace, interactive_runner, ) logger.debug( f'requesting {goal_product} to satisfy execution of `{goal}` goal' ) try: exit_code = self.scheduler_session.run_goal_rule( goal_product, params) finally: console.flush() if exit_code != PANTS_SUCCEEDED_EXIT_CODE: return exit_code return PANTS_SUCCEEDED_EXIT_CODE
def run_goal_rules( self, options_bootstrapper: OptionsBootstrapper, options: Options, goals: Iterable[str], target_roots: TargetRoots, ): """Runs @goal_rules sequentially and interactively by requesting their implicit Goal products. For retryable failures, raises scheduler.ExecutionError. :param goals: The list of requested goal names as passed on the commandline. :param target_roots: The targets root of the request. :returns: An exit code. """ global_options = options.for_global_scope() address_specs = target_roots.specs console = Console( use_colors=global_options.colors, session=self.scheduler_session if global_options.v2_ui else None, ) workspace = Workspace(self.scheduler_session) interactive_runner = InteractiveRunner(self.scheduler_session) for goal in goals: goal_product = self.goal_map[goal] params = Params(address_specs, options_bootstrapper, console, workspace, interactive_runner) logger.debug( f'requesting {goal_product} to satisfy execution of `{goal}` goal' ) try: exit_code = self.scheduler_session.run_goal_rule( goal_product, params) finally: console.flush() if exit_code != PANTS_SUCCEEDED_EXIT_CODE: return exit_code return PANTS_SUCCEEDED_EXIT_CODE
def _init_graph_session( cls, options_initializer: OptionsInitializer, options_bootstrapper: OptionsBootstrapper, build_config: BuildConfiguration, env: CompleteEnvironment, run_id: str, options: Options, scheduler: GraphScheduler | None = None, cancellation_latch: PySessionCancellationLatch | None = None, ) -> GraphSession: native_engine.maybe_set_panic_handler() if scheduler is None: dynamic_remote_options, _ = DynamicRemoteOptions.from_options(options, env) bootstrap_options = options.bootstrap_option_values() assert bootstrap_options is not None scheduler = EngineInitializer.setup_graph( bootstrap_options, build_config, dynamic_remote_options ) with options_initializer.handle_unknown_flags(options_bootstrapper, env, raise_=True): global_options = options.for_global_scope() return scheduler.new_session( run_id, dynamic_ui=global_options.dynamic_ui, ui_use_prodash=global_options.dynamic_ui_renderer == DynamicUIRenderer.experimental_prodash, use_colors=global_options.get("colors", True), max_workunit_level=max( global_options.streaming_workunits_level, global_options.level, *( LogLevel[level.upper()] for level in global_options.log_levels_by_target.values() ), ), session_values=SessionValues( { OptionsBootstrapper: options_bootstrapper, CompleteEnvironment: env, } ), cancellation_latch=cancellation_latch, )
def __init__( self, build_root: str, options: Options, options_bootstrapper: OptionsBootstrapper, build_config: BuildConfiguration, specs: Specs, graph_session: LegacyGraphSession, scheduler_session: SchedulerSession, union_membership: UnionMembership, is_daemon: bool, profile_path: Optional[str], ) -> None: """ :param build_root: The build root for this run. :param options: The parsed options for this run. :param options_bootstrapper: The OptionsBootstrapper instance to use. :param build_config: The parsed build configuration for this run. :param specs: The specs for this run, i.e. either the address or filesystem specs. :param graph_session: A LegacyGraphSession instance for graph reuse. :param is_daemon: Whether or not this run was launched with a daemon graph helper. :param profile_path: The profile path - if any (from from the `PANTS_PROFILE` env var). """ self._build_root = build_root self._options = options self._options_bootstrapper = options_bootstrapper self._build_config = build_config self._specs = specs self._graph_session = graph_session self._scheduler_session = scheduler_session self._union_membership = union_membership self._is_daemon = is_daemon self._profile_path = profile_path self._run_start_time = None self._run_tracker = None self._reporting = None self._repro = None self._global_options = options.for_global_scope()
def run( self, build_config: BuildConfiguration, graph_session: GraphSession, options: Options, specs: Specs, union_membership: UnionMembership, ) -> ExitCode: all_help_info = HelpInfoExtracter.get_all_help_info( options, union_membership, graph_session.goal_consumed_subsystem_scopes, RegisteredTargetTypes.create(build_config.target_types), build_config, ) global_options = options.for_global_scope() help_printer = HelpPrinter( help_request=self.create_help_request(options), all_help_info=all_help_info, color=global_options.colors, ) return help_printer.print_help()
def graph_run_v2( self, session: LegacyGraphSession, specs: Specs, options: Options, options_bootstrapper: OptionsBootstrapper, ) -> int: """Perform an entire v2 run. The exit_code in the return indicates whether any issue was encountered. """ global_options = options.for_global_scope() perform_loop = global_options.get("loop", False) v2 = global_options.v2 if not perform_loop: return self._body(session, options, options_bootstrapper, specs, v2) # TODO: See https://github.com/pantsbuild/pants/issues/6288 regarding Ctrl+C handling. iterations = global_options.loop_max exit_code = PANTS_SUCCEEDED_EXIT_CODE while iterations and not self._state.is_terminating: try: exit_code = self._body(session, options, options_bootstrapper, specs, v2) except session.scheduler_session.execution_error_type as e: self._logger.warning(e) iterations -= 1 while ( iterations and not self._state.is_terminating and not self._loop_condition.wait(timeout=1) ): continue return exit_code
def _loop( self, session: LegacyGraphSession, options: Options, options_bootstrapper: OptionsBootstrapper, ) -> Tuple[Specs, int]: # TODO: See https://github.com/pantsbuild/pants/issues/6288 regarding Ctrl+C handling. iterations = options.for_global_scope().loop_max specs = None exit_code = PANTS_SUCCEEDED_EXIT_CODE while iterations and not self._state.is_terminating: try: specs, exit_code = self._body(session, options, options_bootstrapper) except session.scheduler_session.execution_error_type as e: # Render retryable exceptions raised by the Scheduler. print(e, file=sys.stderr) iterations -= 1 while (iterations and not self._state.is_terminating and not self._loop_condition.wait(timeout=1)): continue return cast(Specs, specs), exit_code
def create( cls, options: Options, session: SchedulerSession, build_root: Optional[str] = None, exclude_patterns: Optional[Iterable[str]] = None, tags: Optional[Iterable[str]] = None, ) -> TargetRoots: # Determine the literal target roots. address_spec_roots = cls.parse_address_specs( target_specs=options.specs, build_root=build_root, exclude_patterns=exclude_patterns, tags=tags) # Determine `Changed` arguments directly from options to support pre-`Subsystem` # initialization paths. changed_options = options.for_scope('changed') changed_request = ChangedRequest.from_options(changed_options) # Determine the `--owner-of=` arguments provided from the global options owned_files = options.for_global_scope().owner_of logger.debug('address_spec_roots are: %s', address_spec_roots) logger.debug('changed_request is: %s', changed_request) logger.debug('owned_files are: %s', owned_files) targets_specified = sum(1 for item in (changed_request.is_actionable(), owned_files, address_spec_roots.dependencies) if item) if targets_specified > 1: # We've been provided more than one of: a change request, an owner request, or spec roots. raise InvalidSpecConstraint( 'Multiple target selection methods provided. Please use only one of ' '--changed-*, --owner-of, or target specs') if changed_request.is_actionable(): scm = get_scm() if not scm: raise InvalidSpecConstraint( 'The --changed-* options are not available without a recognized SCM (usually git).' ) changed_files = cls.changed_files( scm, changes_since=changed_request.changes_since, diffspec=changed_request.diffspec) # We've been provided no spec roots (e.g. `./pants list`) AND a changed request. Compute # alternate target roots. request = OwnersRequest( sources=tuple(changed_files), include_dependees=changed_request.include_dependees, ) changed_addresses, = session.product_request( BuildFileAddresses, [request]) logger.debug('changed addresses: %s', changed_addresses) dependencies = tuple( SingleAddress(a.spec_path, a.target_name) for a in changed_addresses) return TargetRoots( AddressSpecs(dependencies=dependencies, exclude_patterns=exclude_patterns, tags=tags), ) if owned_files: # We've been provided no spec roots (e.g. `./pants list`) AND a owner request. Compute # alternate target roots. request = OwnersRequest( sources=tuple(owned_files), include_dependees=IncludeDependeesOption.NONE, ) owner_addresses, = session.product_request(BuildFileAddresses, [request]) logger.debug('owner addresses: %s', owner_addresses) dependencies = tuple( SingleAddress(a.spec_path, a.target_name) for a in owner_addresses) return TargetRoots( AddressSpecs(dependencies=dependencies, exclude_patterns=exclude_patterns, tags=tags), ) return TargetRoots(address_spec_roots)
class OptionsBootstrapper(object): """An object that knows how to create options in two stages: bootstrap, and then full options.""" def __init__(self, env=None, configpath=None, args=None, buildroot=None): self._buildroot = buildroot or get_buildroot() self._env = env or os.environ.copy() Config.reset_default_bootstrap_option_values(buildroot=self._buildroot) self._pre_bootstrap_config = Config.load([configpath] if configpath else None) self._post_bootstrap_config = None # Will be set later. self._args = args or sys.argv self._bootstrap_options = None # We memoize the bootstrap options here. self._full_options = None # We memoize the full options here. # So other startup code has config to work with. This will go away once we replace direct # config accesses with options, and plumb those through everywhere that needs them. Config.cache(self._pre_bootstrap_config) def get_bootstrap_options(self): """Returns an Options instance that only knows about the bootstrap options.""" if not self._bootstrap_options: flags = set() def capture_the_flags(*args, **kwargs): for flag in Parser.expand_flags(*args, **kwargs): flags.add(flag.name) if flag.inverse_name: flags.add(flag.inverse_name) register_bootstrap_options(capture_the_flags, buildroot=self._buildroot) # Take just the bootstrap args, so we don't choke on other global-scope args on the cmd line. bargs = filter(lambda x: x.partition('=')[0] in flags, self._args or []) self._bootstrap_options = Options(env=self._env, config=self._pre_bootstrap_config, known_scopes=[GLOBAL_SCOPE], args=bargs) register_bootstrap_options(self._bootstrap_options.register_global, buildroot=self._buildroot) bootstrap_option_values = self._bootstrap_options.for_global_scope() Config.reset_default_bootstrap_option_values(values=bootstrap_option_values) # Now re-read the config, post-bootstrapping. Note the order: First whatever we bootstrapped # from (typically pants.ini), then config override, then rcfiles. configpaths = list(self._pre_bootstrap_config.sources()) if bootstrap_option_values.config_override: configpaths.append(bootstrap_option_values.config_override) if bootstrap_option_values.pantsrc: rcfiles = [os.path.expanduser(rcfile) for rcfile in bootstrap_option_values.pantsrc_files] existing_rcfiles = filter(os.path.exists, rcfiles) configpaths.extend(existing_rcfiles) self._post_bootstrap_config = Config.load(configpaths) Config.cache(self._post_bootstrap_config) return self._bootstrap_options def get_full_options(self, known_scopes): if not self._full_options: # Note: Don't inline this into the Options() call, as this populates # self._post_bootstrap_config, which is another argument to that call. bootstrap_options = self.get_bootstrap_options() self._full_options = Options(self._env, self._post_bootstrap_config, known_scopes, args=self._args, bootstrap_option_values=bootstrap_options.for_global_scope()) # The bootstrap options need to be registered on the post-bootstrap Options instance, so it # won't choke on them on the command line, and also so we can access their values as regular # global-scope options, for convenience. register_bootstrap_options(self._full_options.register_global, buildroot=self._buildroot) return self._full_options
def calculate_specs( options_bootstrapper: OptionsBootstrapper, options: Options, session: SchedulerSession, ) -> Specs: """Determine the specs for a given Pants run.""" global_options = options.for_global_scope() unmatched_cli_globs = global_options.unmatched_cli_globs.to_glob_match_error_behavior( ) convert_dir_literal_to_address_literal = ( global_options.use_deprecated_directory_cli_args_semantics) if global_options.is_default( "use_deprecated_directory_cli_args_semantics"): warn_or_error( "2.14.0.dev0", "`use_deprecated_directory_cli_args_semantics` defaulting to True", softwrap(f""" Currently, a directory argument like `{bin_name()} test dir` is shorthand for the target `dir:dir`, i.e. the target that leaves off `name=`. In Pants 2.14, by default, a directory argument will instead match all targets/files in the directory. To opt into the new and more intuitive semantics early, set `use_deprecated_directory_cli_args_semantics = false` in the `[GLOBAL]` section in `pants.toml`. Otherwise, set to `true` to silence this warning. """), ) specs = SpecsParser().parse_specs( options.specs, description_of_origin="CLI arguments", unmatched_glob_behavior=unmatched_cli_globs, convert_dir_literal_to_address_literal= convert_dir_literal_to_address_literal, ) changed_options = ChangedOptions.from_options(options.for_scope("changed")) logger.debug("specs are: %s", specs) logger.debug("changed_options are: %s", changed_options) if specs and changed_options.provided: changed_name = "--changed-since" if changed_options.since else "--changed-diffspec" specs_description = specs.arguments_provided_description() assert specs_description is not None raise InvalidSpecConstraint( f"You used `{changed_name}` at the same time as using {specs_description}. You can " f"only use `{changed_name}` or use normal arguments.") if not changed_options.provided: return specs (git_binary, ) = session.product_request(GitBinary, [Params(GitBinaryRequest())]) (maybe_git_worktree, ) = session.product_request( MaybeGitWorktree, [Params(GitWorktreeRequest(), git_binary)]) if not maybe_git_worktree.git_worktree: raise InvalidSpecConstraint( "The `--changed-*` options are only available if Git is used for the repository." ) changed_files = tuple( changed_options.changed_files(maybe_git_worktree.git_worktree)) file_literal_specs = tuple(FileLiteralSpec(f) for f in changed_files) changed_request = ChangedRequest(changed_files, changed_options.dependees) (changed_addresses, ) = session.product_request( ChangedAddresses, [Params(changed_request, options_bootstrapper)]) logger.debug("changed addresses: %s", changed_addresses) address_literal_specs = [] for address in cast(ChangedAddresses, changed_addresses): address_input = AddressInput.parse( address.spec, description_of_origin="`--changed-since`") address_literal_specs.append( AddressLiteralSpec( path_component=address_input.path_component, target_component=address_input.target_component, generated_component=address_input.generated_component, parameters=address_input.parameters, )) return Specs( includes=RawSpecs( # We need both address_literals and file_literals to cover all our edge cases, including # target-aware vs. target-less goals, e.g. `list` vs `count-loc`. address_literals=tuple(address_literal_specs), file_literals=file_literal_specs, unmatched_glob_behavior=unmatched_cli_globs, filter_by_global_options=True, from_change_detection=True, description_of_origin="`--changed-since`", ), ignores=RawSpecs(description_of_origin="`--changed-since`"), )
class OptionsBootstrapper(object): """An object that knows how to create options in two stages: bootstrap, and then full options.""" def __init__(self, env=None, configpath=None, args=None, buildroot=None): self._buildroot = buildroot or get_buildroot() self._env = env or os.environ.copy() Config.reset_default_bootstrap_option_values(buildroot=self._buildroot) self._pre_bootstrap_config = Config.load([configpath] if configpath else None) self._post_bootstrap_config = None # Will be set later. self._args = args or sys.argv self._bootstrap_options = None # We memoize the bootstrap options here. self._full_options = None # We memoize the full options here. # So other startup code has config to work with. This will go away once we replace direct # config accesses with options, and plumb those through everywhere that needs them. Config.cache(self._pre_bootstrap_config) def get_bootstrap_options(self): """Returns an Options instance that only knows about the bootstrap options.""" if not self._bootstrap_options: flags = set() short_flags = set() def capture_the_flags(*args, **kwargs): for flag in Parser.expand_flags(*args, **kwargs): flags.add(flag.name) if len(flag.name) == 2: short_flags.add(flag.name) if flag.inverse_name: flags.add(flag.inverse_name) register_bootstrap_options(capture_the_flags, buildroot=self._buildroot) def is_bootstrap_option(arg): components = arg.split('=', 1) if components[0] in flags: return True for flag in short_flags: if arg.startswith(flag): return True return False # Take just the bootstrap args, so we don't choke on other global-scope args on the cmd line. # Stop before '--' since args after that are pass-through and may have duplicate names to our # bootstrap options. bargs = filter(is_bootstrap_option, itertools.takewhile(lambda arg: arg != '--', self._args)) self._bootstrap_options = Options(env=self._env, config=self._pre_bootstrap_config, known_scopes=[GLOBAL_SCOPE], args=bargs) register_bootstrap_options(self._bootstrap_options.register_global, buildroot=self._buildroot) bootstrap_option_values = self._bootstrap_options.for_global_scope() Config.reset_default_bootstrap_option_values(values=bootstrap_option_values) # Now re-read the config, post-bootstrapping. Note the order: First whatever we bootstrapped # from (typically pants.ini), then config override, then rcfiles. configpaths = list(self._pre_bootstrap_config.sources()) if bootstrap_option_values.config_override: configpaths.append(bootstrap_option_values.config_override) if bootstrap_option_values.pantsrc: rcfiles = [os.path.expanduser(rcfile) for rcfile in bootstrap_option_values.pantsrc_files] existing_rcfiles = filter(os.path.exists, rcfiles) configpaths.extend(existing_rcfiles) self._post_bootstrap_config = Config.load(configpaths) Config.cache(self._post_bootstrap_config) return self._bootstrap_options def get_full_options(self, known_scopes): if not self._full_options: # Note: Don't inline this into the Options() call, as this populates # self._post_bootstrap_config, which is another argument to that call. bootstrap_options = self.get_bootstrap_options() self._full_options = Options(self._env, self._post_bootstrap_config, known_scopes, args=self._args, bootstrap_option_values=bootstrap_options.for_global_scope()) # The bootstrap options need to be registered on the post-bootstrap Options instance, so it # won't choke on them on the command line, and also so we can access their values as regular # global-scope options, for convenience. register_bootstrap_options(self._full_options.register_global, buildroot=self._buildroot) return self._full_options
class GoalRunner(Command): """Lists installed goals or else executes a named goal.""" class IntermixedArgumentsError(GoalError): pass __command__ = 'goal' output = None # TODO(John Sirois): revisit wholesale locking when we move py support into pants new @classmethod def serialized(cls): # Goal serialization is now handled in goal execution during group processing. # The goal command doesn't need to hold the serialization lock; individual goals will # acquire the lock if they need to be serialized. return False def __init__(self, *args, **kwargs): self.targets = [] self.config = Config.from_cache() known_scopes = [''] for goal in Goal.all(): # Note that enclosing scopes will appear before scopes they enclose. known_scopes.extend(filter(None, goal.known_scopes())) # Annoying but temporary hack to get the parser. We can't use self.parser because # that only gets set up in the superclass ctor, and we can't call that until we have # self.new_options set up because the superclass ctor calls our register_options(). # Fortunately this will all go away once we're fully off the old "Command" mechanism. legacy_parser = args[2] if len(args) > 2 else kwargs['parser'] self.new_options = Options(os.environ.copy(), self.config, known_scopes, args=sys.argv, legacy_parser=legacy_parser) super(GoalRunner, self).__init__(*args, needs_old_options=False, **kwargs) def get_spec_excludes(self): spec_excludes = self.config.getlist(Config.DEFAULT_SECTION, 'spec_excludes', default=None) if spec_excludes is None: return [self.config.getdefault('pants_workdir')] return [os.path.join(self.root_dir, spec_exclude) for spec_exclude in spec_excludes] @property def global_options(self): return self.new_options.for_global_scope() @contextmanager def check_errors(self, banner): errors = {} def error(key, include_traceback=False): exc_type, exc_value, _ = sys.exc_info() msg = StringIO() if include_traceback: frame = inspect.trace()[-2] filename = frame[1] lineno = frame[2] funcname = frame[3] code = ''.join(frame[4]) if frame[4] else None traceback.print_list([(filename, lineno, funcname, code)], file=msg) if exc_type: msg.write(''.join(traceback.format_exception_only(exc_type, exc_value))) errors[key] = msg.getvalue() sys.exc_clear() yield error if errors: msg = StringIO() msg.write(banner) invalid_keys = [key for key, exc in errors.items() if not exc] if invalid_keys: msg.write('\n %s' % '\n '.join(invalid_keys)) for key, exc in errors.items(): if exc: msg.write('\n %s =>\n %s' % (key, '\n '.join(exc.splitlines()))) # The help message for goal is extremely verbose, and will obscure the # actual error message, so we don't show it in this case. self.error(msg.getvalue(), show_help=False) def register_options(self): register_global_options(self.new_options.register_global) for goal in Goal.all(): goal.register_options(self.new_options) def setup_parser(self, parser, args): if not args: args.append('help') logger = logging.getLogger(__name__) goals = self.new_options.goals specs = self.new_options.target_specs fail_fast = self.new_options.for_global_scope().fail_fast for goal in goals: if BuildFile.from_cache(get_buildroot(), goal, must_exist=False).exists(): logger.warning(" Command-line argument '{0}' is ambiguous and was assumed to be " "a goal. If this is incorrect, disambiguate it with ./{0}.".format(goal)) if self.new_options.is_help: self.new_options.print_help(goals=goals, legacy=True) sys.exit(0) self.requested_goals = goals with self.run_tracker.new_workunit(name='setup', labels=[WorkUnit.SETUP]): spec_parser = CmdLineSpecParser(self.root_dir, self.address_mapper, spec_excludes=self.get_spec_excludes()) with self.run_tracker.new_workunit(name='parse', labels=[WorkUnit.SETUP]): for spec in specs: for address in spec_parser.parse_addresses(spec, fail_fast): self.build_graph.inject_address_closure(address) self.targets.append(self.build_graph.get_target(address)) self.goals = [Goal.by_name(goal) for goal in goals] rcfiles = self.config.getdefault('rcfiles', type=list, default=['/etc/pantsrc', '~/.pants.rc']) if rcfiles: rcfile = RcFile(rcfiles, default_prepend=False, process_default=True) # Break down the goals specified on the command line to the full set that will be run so we # can apply default flags to inner goal nodes. Also break down goals by Task subclass and # register the task class hierarchy fully qualified names so we can apply defaults to # baseclasses. sections = OrderedSet() for goal in Engine.execution_order(self.goals): for task_name in goal.ordered_task_names(): sections.add(task_name) task_type = goal.task_type_by_name(task_name) for clazz in task_type.mro(): if clazz == Task: break sections.add('%s.%s' % (clazz.__module__, clazz.__name__)) augmented_args = rcfile.apply_defaults(sections, args) if augmented_args != args: # TODO(John Sirois): Cleanup this currently important mutation of the passed in args # once the 2-layer of command -> goal is squashed into one. args[:] = augmented_args sys.stderr.write("(using pantsrc expansion: pants goal %s)\n" % ' '.join(augmented_args)) def run(self, lock): # TODO(John Sirois): Consider moving to straight python logging. The divide between the # context/work-unit logging and standard python logging doesn't buy us anything. # Enable standard python logging for code with no handle to a context/work-unit. if self.global_options.level: LogOptions.set_stderr_log_level((self.global_options.level or 'info').upper()) logdir = self.global_options.logdir or self.config.get('goals', 'logdir', default=None) if logdir: safe_mkdir(logdir) LogOptions.set_log_dir(logdir) prev_log_level = None # If quiet, temporarily change stderr log level to kill init's output. if self.global_options.quiet: prev_log_level = LogOptions.loglevel_name(LogOptions.stderr_log_level()) # loglevel_name can fail, so only change level if we were able to get the current one. if prev_log_level is not None: LogOptions.set_stderr_log_level(LogOptions._LOG_LEVEL_NONE_KEY) log.init('goals') if prev_log_level is not None: LogOptions.set_stderr_log_level(prev_log_level) else: log.init() # Update the reporting settings, now that we have flags etc. def is_quiet_task(): for goal in self.goals: if goal.has_task_of_type(QuietTaskMixin): return True return False # Target specs are mapped to the patterns which match them, if any. This variable is a key for # specs which don't match any exclusion regexes. We know it won't already be in the list of # patterns, because the asterisks in its name make it an invalid regex. _UNMATCHED_KEY = '** unmatched **' def targets_by_pattern(targets, patterns): mapping = defaultdict(list) for target in targets: matched_pattern = None for pattern in patterns: if re.search(pattern, target.address.spec) is not None: matched_pattern = pattern break if matched_pattern is None: mapping[_UNMATCHED_KEY].append(target) else: mapping[matched_pattern].append(target) return mapping is_explain = self.global_options.explain update_reporting(self.global_options, is_quiet_task() or is_explain, self.run_tracker) if self.global_options.exclude_target_regexp: excludes = self.global_options.exclude_target_regexp log.debug('excludes:\n {excludes}'.format(excludes='\n '.join(excludes))) by_pattern = targets_by_pattern(self.targets, excludes) self.targets = by_pattern[_UNMATCHED_KEY] # The rest of this if-statement is just for debug logging. log.debug('Targets after excludes: {targets}'.format( targets=', '.join(t.address.spec for t in self.targets))) excluded_count = sum(len(by_pattern[p]) for p in excludes) log.debug('Excluded {count} target{plural}.'.format(count=excluded_count, plural=('s' if excluded_count != 1 else ''))) for pattern in excludes: log.debug('Targets excluded by pattern {pattern}\n {targets}'.format(pattern=pattern, targets='\n '.join(t.address.spec for t in by_pattern[pattern]))) context = Context( config=self.config, new_options=self.new_options, run_tracker=self.run_tracker, target_roots=self.targets, requested_goals=self.requested_goals, build_graph=self.build_graph, build_file_parser=self.build_file_parser, address_mapper=self.address_mapper, spec_excludes=self.get_spec_excludes(), lock=lock) unknown = [] for goal in self.goals: if not goal.ordered_task_names(): unknown.append(goal) if unknown: context.log.error('Unknown goal(s): %s\n' % ' '.join(goal.name for goal in unknown)) return 1 engine = RoundEngine() return engine.execute(context, self.goals) def cleanup(self): # TODO: This is JVM-specific and really doesn't belong here. # TODO: Make this more selective? Only kill nailguns that affect state? E.g., checkstyle # may not need to be killed. NailgunTask.killall(log.info) sys.exit(1)