def test_materialize_input_files(self) -> None: program_text = b'#!/usr/bin/python\nprint("hello")' binary = self.create_mock_binary(program_text) interactive_runner = InteractiveRunner(self.scheduler) request = InteractiveProcessRequest( argv=("./program.py",), run_in_workspace=False, input_files=binary.digest, ) result = interactive_runner.run_local_interactive_process(request) self.assertEqual(result.process_exit_code, 0)
def run_pytest( self, *, passthrough_args: Optional[str] = None, origin: Optional[OriginSpec] = None, ) -> TestResult: args = [ "--backend-packages2=pants.backend.python", # pin to lower versions so that we can run Python 2 tests "--pytest-version=pytest>=4.6.6,<4.7", "--pytest-pytest-plugins=['zipp==1.0.0']", ] if passthrough_args: args.append(f"--pytest-args='{passthrough_args}'") options_bootstrapper = create_options_bootstrapper(args=args) if origin is None: origin = SingleAddress(directory=self.source_root, name="target") # TODO: We must use the V1 target's `_sources_field.sources` field to set the TargetAdaptor's # sources attribute. The adaptor will not auto-populate this field. However, it will # auto-populate things like `dependencies` and this was not necessary before using # PythonTestsAdaptorWithOrigin. Why is this necessary in test code? v1_target = self.target(f"{self.source_root}:target") adaptor = PythonTestsAdaptor( address=v1_target.address.to_address(), sources=v1_target._sources_field.sources, ) params = Params( PytestRunner(PythonTestsAdaptorWithOrigin(adaptor, origin)), options_bootstrapper ) test_result = self.request_single_product(TestResult, params) debug_request = self.request_single_product(TestDebugRequest, params) debug_result = InteractiveRunner(self.scheduler).run_local_interactive_process( debug_request.ipr ) if test_result.status == Status.SUCCESS: assert debug_result.process_exit_code == 0 else: assert debug_result.process_exit_code != 0 return test_result
async def run_tests( console: Console, options: TestOptions, runner: InteractiveRunner, addresses: BuildFileAddresses, ) -> Test: if options.values.debug: address = await Get[BuildFileAddress](BuildFileAddresses, addresses) addr_debug_request = await Get[AddressAndDebugRequest](Address, address.to_address()) result = runner.run_local_interactive_process(addr_debug_request.request.ipr) return Test(result.process_exit_code) results = await MultiGet(Get[AddressAndTestResult](Address, addr.to_address()) for addr in addresses) did_any_fail = False filtered_results = [(x.address, x.test_result) for x in results if x.test_result is not None] for address, test_result in filtered_results: if test_result.status == Status.FAILURE: did_any_fail = True if test_result.stdout: console.write_stdout(f"{address.reference()} stdout:\n{test_result.stdout}\n") if test_result.stderr: # NB: we write to stdout, rather than to stderr, to avoid potential issues interleaving the # two streams. console.write_stdout(f"{address.reference()} stderr:\n{test_result.stderr}\n") console.write_stdout("\n") for address, test_result in filtered_results: console.print_stdout(f'{address.reference():80}.....{test_result.status.value:>10}') if did_any_fail: console.print_stderr(console.red('\nTests failed')) exit_code = PANTS_FAILED_EXIT_CODE else: exit_code = PANTS_SUCCEEDED_EXIT_CODE return Test(exit_code)
def single_target_test(self, result, expected_console_output, success=True, debug=False): console = MockConsole(use_colors=False) options = MockOptions(debug=debug) runner = InteractiveRunner(self.scheduler) addr = self.make_build_target_address("some/target") res = run_rule( run_tests, rule_args=[console, options, runner, BuildFileAddresses([addr])], mock_gets=[ MockGet( product_type=AddressAndTestResult, subject_type=Address, mock=lambda _: AddressAndTestResult(addr, result), ), MockGet(product_type=AddressAndDebugRequest, subject_type=Address, mock=lambda _: AddressAndDebugRequest( addr, TestDebugRequest(ipr=self.make_successful_ipr( ) if success else self.make_failure_ipr()))), MockGet( product_type=BuildFileAddress, subject_type=BuildFileAddresses, mock=lambda bfas: bfas.dependencies[0], ), ], ) assert console.stdout.getvalue() == expected_console_output assert (0 if success else 1) == res.exit_code
def test_repl_ipython(self) -> None: self.setup_python_library() self.execute_rule( global_args=["--backend-packages2=pants.backend.python"], args=["--shell=ipython", "src/python/lib.py"], additional_params=[InteractiveRunner(self.scheduler)], )
def run_console_rules(self, options_bootstrapper, goals, target_roots): """Runs @console_rules sequentially and interactively by requesting their implicit Goal products. For retryable failures, raises scheduler.ExecutionError. :param list goals: The list of requested goal names as passed on the commandline. :param TargetRoots target_roots: The targets root of the request. :returns: An exit code. """ subject = target_roots.specs console = Console( use_colors=options_bootstrapper.bootstrap_options.for_global_scope().colors ) workspace = Workspace(self.scheduler_session) interactive_runner = InteractiveRunner(self.scheduler_session) for goal in goals: goal_product = self.goal_map[goal] params = Params(subject, options_bootstrapper, console, workspace, interactive_runner) logger.debug(f'requesting {goal_product} to satisfy execution of `{goal}` goal') try: exit_code = self.scheduler_session.run_console_rule(goal_product, params) finally: console.flush() if exit_code != PANTS_SUCCEEDED_EXIT_CODE: return exit_code return PANTS_SUCCEEDED_EXIT_CODE
def single_target_run( self, *, console: MockConsole, program_text: bytes, address_spec: str, ) -> Run: workspace = Workspace(self.scheduler) interactive_runner = InteractiveRunner(self.scheduler) BuildRoot().path = self.build_root res = run_rule( run, rule_args=[ console, workspace, interactive_runner, BuildRoot(), Addresses([Address.parse(address_spec)]), MockOptions(args=[]), ], mock_gets=[ MockGet( product_type=CreatedBinary, subject_type=Address, mock=lambda _: self.create_mock_binary(program_text), ), ], ) return cast(Run, res)
def run_pytest(self, *, passthrough_args: Optional[str] = None) -> TestResult: args = [ "--backend-packages2=pants.backend.python", "--pytest-version=pytest>=4.6.6,<4.7", # so that we can run Python 2 tests ] if passthrough_args: args.append(f"--pytest-args='{passthrough_args}'") options_bootstrapper = create_options_bootstrapper(args=args) target = PythonTestsAdaptor(address=BuildFileAddress( rel_path=f"{self.source_root}/BUILD", target_name="target"), ) test_result = self.request_single_product( TestResult, Params(target, options_bootstrapper)) debug_request = self.request_single_product( TestDebugRequest, Params(target, options_bootstrapper), ) debug_result = InteractiveRunner( self.scheduler).run_local_interactive_process(debug_request.ipr) if test_result.status == Status.SUCCESS: assert debug_result.process_exit_code == 0 else: assert debug_result.process_exit_code != 0 return test_result
def run(console: Console, workspace: Workspace, runner: InteractiveRunner, bfa: BuildFileAddress) -> Run: target = bfa.to_address() binary = yield Get(CreatedBinary, Address, target) with temporary_dir(cleanup=True) as tmpdir: dirs_to_materialize = (DirectoryToMaterialize( path=str(tmpdir), directory_digest=binary.digest), ) workspace.materialize_directories(dirs_to_materialize) console.write_stdout(f"Running target: {target}\n") full_path = str(Path(tmpdir, binary.binary_name)) run_request = InteractiveProcessRequest( argv=[full_path], run_in_workspace=True, ) try: result = runner.run_local_interactive_process(run_request) exit_code = result.process_exit_code if result.process_exit_code == 0: console.write_stdout(f"{target} ran successfully.\n") else: console.write_stderr( f"{target} failed with code {result.process_exit_code}!\n") except Exception as e: console.write_stderr( f"Exception when attempting to run {target} : {e}\n") exit_code = -1 yield Run(exit_code)
def run_pytest( self, *, passthrough_args: Optional[str] = None, origin: Optional[OriginSpec] = None, ) -> TestResult: args = [ "--backend-packages2=pants.backend.python", # pin to lower versions so that we can run Python 2 tests "--pytest-version=pytest>=4.6.6,<4.7", "--pytest-pytest-plugins=['zipp==1.0.0']", ] if passthrough_args: args.append(f"--pytest-args='{passthrough_args}'") options_bootstrapper = create_options_bootstrapper(args=args) address = Address(self.source_root, "target") if origin is None: origin = SingleAddress(directory=address.spec_path, name=address.target_name) tgt = PythonTests({}, address=address) params = Params( PythonTestConfiguration.create(TargetWithOrigin(tgt, origin)), options_bootstrapper) test_result = self.request_single_product(TestResult, params) debug_request = self.request_single_product(TestDebugRequest, params) debug_result = InteractiveRunner( self.scheduler).run_local_interactive_process(debug_request.ipr) if test_result.status == Status.SUCCESS: assert debug_result.process_exit_code == 0 else: assert debug_result.process_exit_code != 0 return test_result
def test_output_mixed(self): console = MockConsole(use_colors=False) options = MockOptions(debug=False) runner = InteractiveRunner(self.scheduler) target1 = self.make_build_target_address( "testprojects/tests/python/pants/passes") target2 = self.make_build_target_address( "testprojects/tests/python/pants/fails") def make_result(target): if target == target1: tr = TestResult(status=Status.SUCCESS, stdout='I passed\n', stderr='') elif target == target2: tr = TestResult(status=Status.FAILURE, stdout='I failed\n', stderr='') else: raise Exception("Unrecognised target") return AddressAndTestResult(target, tr) def make_debug_request(target): request = TestDebugRequest(ipr=self.make_successful_ipr( ) if target == target1 else self.make_failure_ipr()) return AddressAndDebugRequest(target, request) res = run_rule( run_tests, rule_args=[console, options, runner, (target1, target2)], mock_gets=[ MockGet(product_type=AddressAndTestResult, subject_type=Address, mock=make_result), MockGet(product_type=AddressAndDebugRequest, subject_type=Address, mock=make_debug_request), MockGet(product_type=BuildFileAddress, subject_type=BuildFileAddresses, mock=lambda tgt: BuildFileAddress( rel_path=f'{tgt.spec_path}/BUILD', target_name=tgt.target_name, )), ], ) self.assertEqual(1, res.exit_code) self.assertEquals( console.stdout.getvalue(), dedent("""\ testprojects/tests/python/pants/passes stdout: I passed testprojects/tests/python/pants/fails stdout: I failed testprojects/tests/python/pants/passes ..... SUCCESS testprojects/tests/python/pants/fails ..... FAILURE """))
def single_target_run( self, *, console: MockConsole, program_text: bytes, address_spec: str, ) -> Run: workspace = Workspace(self.scheduler) interactive_runner = InteractiveRunner(self.scheduler) address = Address.parse(address_spec) bfa = BuildFileAddress(build_file=None, target_name=address.target_name, rel_path=f'{address.spec_path}/BUILD') BuildRoot().path = self.build_root res = run_rule( run, rule_args=[ console, workspace, interactive_runner, BuildRoot(), bfa, MockOptions(args=[]) ], mock_gets=[ MockGet(product_type=CreatedBinary, subject_type=Address, mock=lambda _: self.create_mock_binary(program_text)), ], ) return cast(Run, res)
def test_repl_ipython(self) -> None: self.setup_python_library() output = self.execute_rule( global_args=["--backend-packages2=pants.backend.python"], args=["--shell=ipython", "src/python:some_lib"], additional_params=[InteractiveRunner(self.scheduler)], ) assert output == "REPL exited successfully."
async def run_repl( console: Console, workspace: Workspace, runner: InteractiveRunner, options: ReplOptions, transitive_targets: TransitiveTargets, build_root: BuildRoot, union_membership: UnionMembership, global_options: GlobalOptions, ) -> Repl: # We can guarantee that we will only even enter this `goal_rule` if there exists an implementer # of the `ReplImplementation` union because `LegacyGraphSession.run_goal_rules()` will not # execute this rule's body if there are no implementations registered. membership: Iterable[Type[ ReplImplementation]] = union_membership.union_rules[ReplImplementation] implementations = {impl.name: impl for impl in membership} default_repl = "python" repl_shell_name = cast(str, options.values.shell or default_repl) repl_implementation_cls = implementations.get(repl_shell_name) if repl_implementation_cls is None: available = sorted(set(implementations.keys())) console.write_stdout( f"{repl_shell_name} is not an installed REPL program. Available REPLs: {available}" ) return Repl(-1) repl_impl = repl_implementation_cls(targets=Targets( tgt for tgt in transitive_targets.closure if repl_implementation_cls.is_valid(tgt))) repl_binary = await Get[ReplBinary](ReplImplementation, repl_impl) with temporary_dir(root_dir=global_options.options.pants_workdir, cleanup=False) as tmpdir: path_relative_to_build_root = PurePath(tmpdir).relative_to( build_root.path).as_posix() workspace.materialize_directory( DirectoryToMaterialize(repl_binary.digest, path_prefix=path_relative_to_build_root)) full_path = PurePath(tmpdir, repl_binary.binary_name).as_posix() run_request = InteractiveProcessRequest( argv=(full_path, ), run_in_workspace=True, ) result = runner.run_local_interactive_process(run_request) exit_code = result.process_exit_code if exit_code == 0: console.write_stdout("REPL exited successfully.") else: console.write_stdout(f"REPL exited with error: {exit_code}.") return Repl(exit_code)
def run_test_rule( self, *, test_runner: Type[TestRunner], targets: List[HydratedTargetWithOrigin], debug: bool = False, ) -> Tuple[int, str]: console = MockConsole(use_colors=False) options = MockOptions(debug=debug, run_coverage=False) interactive_runner = InteractiveRunner(self.scheduler) workspace = Workspace(self.scheduler) union_membership = UnionMembership( {TestRunner: OrderedSet([test_runner])}) def mock_coordinator_of_tests( wrapped_test_runner: WrappedTestRunner, ) -> AddressAndTestResult: runner = wrapped_test_runner.runner return AddressAndTestResult( address=runner.adaptor_with_origin.adaptor.address, test_result=runner.test_result, # type: ignore[attr-defined] ) result: Test = run_rule( run_tests, rule_args=[ console, options, interactive_runner, HydratedTargetsWithOrigins(targets), workspace, union_membership, ], mock_gets=[ MockGet( product_type=AddressAndTestResult, subject_type=WrappedTestRunner, mock=lambda wrapped_test_runner: mock_coordinator_of_tests( wrapped_test_runner), ), MockGet( product_type=TestDebugRequest, subject_type=TestRunner, mock=lambda _: TestDebugRequest(self.make_ipr()), ), MockGet( product_type=CoverageReport, subject_type=CoverageDataBatch, mock=lambda _: FilesystemCoverageReport( result_digest=EMPTY_DIRECTORY_DIGEST, directory_to_materialize_to=PurePath("mockety/mock"), report_file=None, ), ), ], union_membership=union_membership, ) return result.exit_code, console.stdout.getvalue()
def run_goal_rules( self, *, options_bootstrapper: OptionsBootstrapper, union_membership: UnionMembership, options: Options, goals: Iterable[str], specs: Specs, ): """Runs @goal_rules sequentially and interactively by requesting their implicit Goal products. For retryable failures, raises scheduler.ExecutionError. :returns: An exit code. """ global_options = options.for_global_scope() console = Console( use_colors=global_options.colors, session=self.scheduler_session if global_options.get("v2_ui") else None, ) workspace = Workspace(self.scheduler_session) interactive_runner = InteractiveRunner(self.scheduler_session) for goal in goals: goal_product = self.goal_map[goal] # NB: We no-op for goals that have no V2 implementation because no relevant backends are # registered. This allows us to safely set `--v1 --v2`, even if no V2 backends are registered. # Once V1 is removed, we might want to reconsider the behavior to instead warn or error when # trying to run something like `./pants run` without any backends registered. is_implemented = union_membership.has_members_for_all( goal_product.subsystem_cls.required_union_implementations) if not is_implemented: continue params = Params( specs.provided_specs, options_bootstrapper, console, workspace, interactive_runner, ) logger.debug( f"requesting {goal_product} to satisfy execution of `{goal}` goal" ) try: exit_code = self.scheduler_session.run_goal_rule( goal_product, params) finally: console.flush() if exit_code != PANTS_SUCCEEDED_EXIT_CODE: return exit_code return PANTS_SUCCEEDED_EXIT_CODE
def test_repl_bogus_repl_name(self) -> None: self.setup_python_library() result = self.execute_rule( global_args=["--backend-packages2=pants.backend.python"], args=["--shell=bogus-repl", "src/python/lib.py"], additional_params=[InteractiveRunner(self.scheduler)], exit_code=-1, ) assert "'bogus-repl' is not a registered REPL. Available REPLs" in result.stderr
def test_repl_bogus_repl_name(self) -> None: self.setup_python_library() output = self.execute_rule( global_args=["--backend-packages2=pants.backend.python"], args=["--shell=bogus-repl", "src/python:some_lib"], additional_params=[InteractiveRunner(self.scheduler)], exit_code=-1, ) assert "bogus-repl is not an installed REPL program. Available REPLs:" in output
def test_repl_with_targets(self) -> None: self.setup_python_library() self.execute_rule( global_args=[ "--backend-packages2=pants.backend.python", "--source-root-patterns=src/python", ], args=["src/python/lib.py"], additional_params=[InteractiveRunner(self.scheduler)], )
def single_target_run( self, *, console: MockConsole, program_text: bytes, address_spec: str, ) -> Run: workspace = Workspace(self.scheduler) interactive_runner = InteractiveRunner(self.scheduler) class TestBinaryConfiguration(BinaryConfiguration): required_fields = () class TestBinaryTarget(Target): alias = "binary" core_fields = () address = Address.parse(address_spec) origin = SingleAddress(address.spec_path, address.target_name) res = run_rule( run, rule_args=[ console, workspace, interactive_runner, BuildRoot(), TargetsWithOrigins([ TargetWithOrigin( target=TestBinaryTarget(unhydrated_values={}, address=address), origin=origin, ) ]), create_goal_subsystem(RunOptions, args=[]), create_subsystem(GlobalOptions, pants_workdir=self.pants_workdir), UnionMembership(union_rules={ BinaryConfiguration: OrderedSet([TestBinaryConfiguration]) }), RegisteredTargetTypes.create([TestBinaryTarget]), ], mock_gets=[ MockGet( product_type=CreatedBinary, subject_type=TestBinaryConfiguration, mock=lambda _: self.create_mock_binary(program_text), ), ], ) return cast(Run, res)
def single_target_run( self, *, console: MockConsole, program_text: bytes, address_spec: str, ) -> Run: workspace = Workspace(self.scheduler) interactive_runner = InteractiveRunner(self.scheduler) class TestBinaryFieldSet(BinaryFieldSet): required_fields = () class TestBinaryTarget(Target): alias = "binary" core_fields = () address = Address.parse(address_spec) target = TestBinaryTarget({}, address=address) target_with_origin = TargetWithOrigin( target, SingleAddress(address.spec_path, address.target_name)) field_set = TestBinaryFieldSet.create(target) res = run_rule( run, rule_args=[ create_goal_subsystem(RunOptions, args=[]), create_subsystem(GlobalOptions, pants_workdir=self.pants_workdir), console, interactive_runner, workspace, BuildRoot(), ], mock_gets=[ MockGet( product_type=TargetsToValidFieldSets, subject_type=TargetsToValidFieldSetsRequest, mock=lambda _: TargetsToValidFieldSets( {target_with_origin: [field_set]}), ), MockGet( product_type=CreatedBinary, subject_type=TestBinaryFieldSet, mock=lambda _: self.create_mock_binary(program_text), ), ], ) return cast(Run, res)
async def run( console: Console, workspace: Workspace, runner: InteractiveRunner, build_root: BuildRoot, options: RunOptions, global_options: GlobalOptions, ) -> Run: targets_to_valid_configs = await Get[TargetsToValidConfigurations]( TargetsToValidConfigurationsRequest( BinaryConfiguration, goal_description=f"the `{options.name}` goal", error_if_no_valid_targets=True, expect_single_config=True, )) config = targets_to_valid_configs.configurations[0] binary = await Get[CreatedBinary](BinaryConfiguration, config) workdir = global_options.options.pants_workdir with temporary_dir(root_dir=workdir, cleanup=True) as tmpdir: path_relative_to_build_root = PurePath(tmpdir).relative_to( build_root.path).as_posix() workspace.materialize_directory( DirectoryToMaterialize(binary.digest, path_prefix=path_relative_to_build_root)) console.write_stdout(f"Running target: {config.address}\n") full_path = PurePath(tmpdir, binary.binary_name).as_posix() run_request = InteractiveProcessRequest( argv=(full_path, *options.values.args), run_in_workspace=True, ) try: result = runner.run_local_interactive_process(run_request) exit_code = result.process_exit_code if result.process_exit_code == 0: console.write_stdout(f"{config.address} ran successfully.\n") else: console.write_stderr( f"{config.address} failed with code {result.process_exit_code}!\n" ) except Exception as e: console.write_stderr( f"Exception when attempting to run {config.address}: {e!r}\n") exit_code = -1 return Run(exit_code)
def run(console: Console, runner: InteractiveRunner, build_file_addresses: BuildFileAddresses) -> Run: console.write_stdout("Running the `run` goal\n") request = InteractiveProcessRequest( argv=["/usr/bin/python"], env=("TEST_ENV", "TEST"), run_in_workspace=False, ) try: res = runner.run_local_interactive_process(request) print(f"Subprocess exited with result: {res.process_exit_code}") yield Run(res.process_exit_code) except Exception as e: print(f"Exception when running local interactive process: {e}") yield Run(-1)
def run_goal_rules( self, options_bootstrapper: OptionsBootstrapper, options: Options, goals: Iterable[str], specs: Specs, ): """Runs @goal_rules sequentially and interactively by requesting their implicit Goal products. For retryable failures, raises scheduler.ExecutionError. :returns: An exit code. """ global_options = options.for_global_scope() console = Console( use_colors=global_options.colors, session=self.scheduler_session if global_options.get('v2_ui') else None, ) workspace = Workspace(self.scheduler_session) interactive_runner = InteractiveRunner(self.scheduler_session) for goal in goals: goal_product = self.goal_map[goal] params = Params( specs.provided_specs, options_bootstrapper, console, workspace, interactive_runner, ) logger.debug( f'requesting {goal_product} to satisfy execution of `{goal}` goal' ) try: exit_code = self.scheduler_session.run_goal_rule( goal_product, params) finally: console.flush() if exit_code != PANTS_SUCCEEDED_EXIT_CODE: return exit_code return PANTS_SUCCEEDED_EXIT_CODE
async def run_repl( console: Console, workspace: Workspace, runner: InteractiveRunner, options: ReplOptions, transitive_targets: TransitiveTargets, build_root: BuildRoot, union_membership: UnionMembership, global_options: GlobalOptions, ) -> Repl: default_repl = "python" repl_shell_name = cast(str, options.values.shell) or default_repl implementations: Dict[str, Type[ReplImplementation]] = { impl.name: impl for impl in union_membership[ReplImplementation] } repl_implementation_cls = implementations.get(repl_shell_name) if repl_implementation_cls is None: available = sorted(implementations.keys()) console.print_stderr( f"{repr(repl_shell_name)} is not a registered REPL. Available REPLs (which may " f"be specified through the option `--repl-shell`): {available}") return Repl(-1) repl_impl = repl_implementation_cls(targets=Targets( tgt for tgt in transitive_targets.closure if repl_implementation_cls.is_valid(tgt))) repl_binary = await Get[ReplBinary](ReplImplementation, repl_impl) with temporary_dir(root_dir=global_options.options.pants_workdir, cleanup=False) as tmpdir: path_relative_to_build_root = PurePath(tmpdir).relative_to( build_root.path).as_posix() workspace.materialize_directory( DirectoryToMaterialize(repl_binary.digest, path_prefix=path_relative_to_build_root)) full_path = PurePath(tmpdir, repl_binary.binary_name).as_posix() run_request = InteractiveProcessRequest( argv=(full_path, ), run_in_workspace=True, ) result = runner.run_local_interactive_process(run_request) return Repl(result.process_exit_code)
def single_target_test(self, result, expected_console_output, success=True, debug=False): console = MockConsole(use_colors=False) options = MockOptions(debug=debug, run_coverage=False) runner = InteractiveRunner(self.scheduler) workspace = Workspace(self.scheduler) addr = Address.parse("some/target") res = run_rule( run_tests, rule_args=[ console, options, runner, self.make_addresses_with_origins(addr), workspace ], mock_gets=[ MockGet( product_type=AddressAndTestResult, subject_type=AddressWithOrigin, mock=lambda _: AddressAndTestResult(addr, result), ), MockGet( product_type=AddressAndDebugRequest, subject_type=AddressWithOrigin, mock=lambda _: AddressAndDebugRequest( addr, TestDebugRequest(ipr=self.make_successful_ipr() if success else self.make_failure_ipr( )), ), ), MockGet( product_type=CoverageReport, subject_type=CoverageDataBatch, mock=lambda _: CoverageReport( result_digest=EMPTY_DIRECTORY_DIGEST, directory_to_materialize_to=PurePath("mockety/mock"), ), ), ], ) assert console.stdout.getvalue() == expected_console_output assert (0 if success else 1) == res.exit_code
async def run( console: Console, workspace: Workspace, runner: InteractiveRunner, build_root: BuildRoot, addresses: Addresses, options: RunOptions, ) -> Run: address = addresses.expect_single() binary = await Get[CreatedBinary](Address, address) with temporary_dir(root_dir=PurePath(build_root.path, ".pants.d").as_posix(), cleanup=True) as tmpdir: path_relative_to_build_root = PurePath(tmpdir).relative_to( build_root.path).as_posix() workspace.materialize_directory( DirectoryToMaterialize(binary.digest, path_prefix=path_relative_to_build_root)) console.write_stdout(f"Running target: {address}\n") full_path = PurePath(tmpdir, binary.binary_name).as_posix() run_request = InteractiveProcessRequest( argv=(full_path, *options.values.args), run_in_workspace=True, ) try: result = runner.run_local_interactive_process(run_request) exit_code = result.process_exit_code if result.process_exit_code == 0: console.write_stdout(f"{address} ran successfully.\n") else: console.write_stderr( f"{address} failed with code {result.process_exit_code}!\n" ) except Exception as e: console.write_stderr( f"Exception when attempting to run {address}: {e!r}\n") exit_code = -1 return Run(exit_code)
async def run( options: RunOptions, global_options: GlobalOptions, console: Console, runner: InteractiveRunner, workspace: Workspace, build_root: BuildRoot, ) -> Run: targets_to_valid_field_sets = await Get[TargetsToValidFieldSets]( TargetsToValidFieldSetsRequest( BinaryFieldSet, goal_description=f"the `{options.name}` goal", error_if_no_valid_targets=True, expect_single_field_set=True, )) field_set = targets_to_valid_field_sets.field_sets[0] binary = await Get[CreatedBinary](BinaryFieldSet, field_set) workdir = global_options.options.pants_workdir with temporary_dir(root_dir=workdir, cleanup=True) as tmpdir: path_relative_to_build_root = PurePath(tmpdir).relative_to( build_root.path).as_posix() workspace.materialize_directory( DirectoryToMaterialize(binary.digest, path_prefix=path_relative_to_build_root)) full_path = PurePath(tmpdir, binary.binary_name).as_posix() run_request = InteractiveProcessRequest( argv=(full_path, *options.values.args), run_in_workspace=True, ) try: result = runner.run_local_interactive_process(run_request) exit_code = result.process_exit_code except Exception as e: console.print_stderr( f"Exception when attempting to run {field_set.address}: {e!r}") exit_code = -1 return Run(exit_code)
async def run( console: Console, workspace: Workspace, runner: InteractiveRunner, build_root: BuildRoot, bfa: BuildFileAddress, ) -> Run: target = bfa.to_address() binary = await Get[CreatedBinary](Address, target) with temporary_dir(root_dir=str(Path(build_root.path, ".pants.d")), cleanup=True) as tmpdir: path_relative_to_build_root = str( Path(tmpdir).relative_to(build_root.path)) workspace.materialize_directory( DirectoryToMaterialize(binary.digest, path_prefix=path_relative_to_build_root)) console.write_stdout(f"Running target: {target}\n") full_path = str(Path(tmpdir, binary.binary_name)) run_request = InteractiveProcessRequest( argv=(full_path, ), run_in_workspace=True, ) try: result = runner.run_local_interactive_process(run_request) exit_code = result.process_exit_code if result.process_exit_code == 0: console.write_stdout(f"{target} ran successfully.\n") else: console.write_stderr( f"{target} failed with code {result.process_exit_code}!\n") except Exception as e: console.write_stderr( f"Exception when attempting to run {target} : {e}\n") exit_code = -1 return Run(exit_code)
def run_test_rule( self, *, field_set: Type[TestFieldSet], targets: List[TargetWithOrigin], debug: bool = False, use_coverage: bool = False, include_sources: bool = True, valid_targets: bool = True, ) -> Tuple[int, str]: console = MockConsole(use_colors=False) options = create_goal_subsystem(TestOptions, debug=debug, use_coverage=use_coverage) interactive_runner = InteractiveRunner(self.scheduler) workspace = Workspace(self.scheduler) union_membership = UnionMembership({ TestFieldSet: [field_set], CoverageDataCollection: [MockCoverageDataCollection] }) def mock_find_valid_field_sets( _: TargetsToValidFieldSetsRequest, ) -> TargetsToValidFieldSets: if not valid_targets: return TargetsToValidFieldSets({}) return TargetsToValidFieldSets({ tgt_with_origin: [field_set.create(tgt_with_origin)] for tgt_with_origin in targets }) def mock_coordinator_of_tests( wrapped_field_set: WrappedTestFieldSet, ) -> AddressAndTestResult: field_set = cast(MockTestFieldSet, wrapped_field_set.field_set) return AddressAndTestResult(address=field_set.address, test_result=field_set.test_result) def mock_coverage_report_generation( coverage_data_collection: MockCoverageDataCollection, ) -> CoverageReport: addresses = ", ".join( coverage_data.address.spec for coverage_data in coverage_data_collection) return ConsoleCoverageReport(f"Ran coverage on {addresses}") result: Test = run_rule( run_tests, rule_args=[ console, options, interactive_runner, workspace, union_membership ], mock_gets=[ MockGet( product_type=TargetsToValidFieldSets, subject_type=TargetsToValidFieldSetsRequest, mock=mock_find_valid_field_sets, ), MockGet( product_type=AddressAndTestResult, subject_type=WrappedTestFieldSet, mock=lambda wrapped_config: mock_coordinator_of_tests( wrapped_config), ), MockGet( product_type=TestDebugRequest, subject_type=TestFieldSet, mock=lambda _: TestDebugRequest(self.make_ipr()), ), MockGet( product_type=FieldSetsWithSources, subject_type=FieldSetsWithSourcesRequest, mock=lambda field_sets: FieldSetsWithSources( field_sets if include_sources else ()), ), MockGet( product_type=CoverageReport, subject_type=CoverageDataCollection, mock=mock_coverage_report_generation, ), ], union_membership=union_membership, ) assert not console.stdout.getvalue() return result.exit_code, console.stderr.getvalue()