def test_repl_ipython(self) -> None: self.setup_python_library() self.execute_rule( global_args=["--backend-packages2=pants.backend.python"], args=["--shell=ipython", "src/python/lib.py"], additional_params=[InteractiveRunner(self.scheduler)], )
def single_target_test(self, result, expected_console_output, success=True, debug=False): console = MockConsole(use_colors=False) options = MockOptions(debug=debug) runner = InteractiveRunner(self.scheduler) addr = self.make_build_target_address("some/target") res = run_rule( run_tests, rule_args=[console, options, runner, BuildFileAddresses([addr])], mock_gets=[ MockGet( product_type=AddressAndTestResult, subject_type=Address, mock=lambda _: AddressAndTestResult(addr, result), ), MockGet(product_type=AddressAndDebugRequest, subject_type=Address, mock=lambda _: AddressAndDebugRequest( addr, TestDebugRequest(ipr=self.make_successful_ipr( ) if success else self.make_failure_ipr()))), MockGet( product_type=BuildFileAddress, subject_type=BuildFileAddresses, mock=lambda bfas: bfas.dependencies[0], ), ], ) assert console.stdout.getvalue() == expected_console_output assert (0 if success else 1) == res.exit_code
def run_pytest( self, *, passthrough_args: Optional[str] = None, origin: Optional[OriginSpec] = None, ) -> TestResult: args = [ "--backend-packages2=pants.backend.python", # pin to lower versions so that we can run Python 2 tests "--pytest-version=pytest>=4.6.6,<4.7", "--pytest-pytest-plugins=['zipp==1.0.0']", ] if passthrough_args: args.append(f"--pytest-args='{passthrough_args}'") options_bootstrapper = create_options_bootstrapper(args=args) if origin is None: origin = SingleAddress(directory=self.source_root, name="target") # TODO: We must use the V1 target's `_sources_field.sources` field to set the TargetAdaptor's # sources attribute. The adaptor will not auto-populate this field. However, it will # auto-populate things like `dependencies` and this was not necessary before using # PythonTestsAdaptorWithOrigin. Why is this necessary in test code? v1_target = self.target(f"{self.source_root}:target") adaptor = PythonTestsAdaptor( address=v1_target.address.to_address(), sources=v1_target._sources_field.sources, ) params = Params( PytestRunner(PythonTestsAdaptorWithOrigin(adaptor, origin)), options_bootstrapper ) test_result = self.request_single_product(TestResult, params) debug_request = self.request_single_product(TestDebugRequest, params) debug_result = InteractiveRunner(self.scheduler).run_local_interactive_process( debug_request.ipr ) if test_result.status == Status.SUCCESS: assert debug_result.process_exit_code == 0 else: assert debug_result.process_exit_code != 0 return test_result
def run_console_rules(self, options_bootstrapper, goals, target_roots): """Runs @console_rules sequentially and interactively by requesting their implicit Goal products. For retryable failures, raises scheduler.ExecutionError. :param list goals: The list of requested goal names as passed on the commandline. :param TargetRoots target_roots: The targets root of the request. :returns: An exit code. """ subject = target_roots.specs console = Console( use_colors=options_bootstrapper.bootstrap_options.for_global_scope().colors ) workspace = Workspace(self.scheduler_session) interactive_runner = InteractiveRunner(self.scheduler_session) for goal in goals: goal_product = self.goal_map[goal] params = Params(subject, options_bootstrapper, console, workspace, interactive_runner) logger.debug(f'requesting {goal_product} to satisfy execution of `{goal}` goal') try: exit_code = self.scheduler_session.run_console_rule(goal_product, params) finally: console.flush() if exit_code != PANTS_SUCCEEDED_EXIT_CODE: return exit_code return PANTS_SUCCEEDED_EXIT_CODE
def run_pytest(self, *, passthrough_args: Optional[str] = None) -> TestResult: args = [ "--backend-packages2=pants.backend.python", "--pytest-version=pytest>=4.6.6,<4.7", # so that we can run Python 2 tests ] if passthrough_args: args.append(f"--pytest-args='{passthrough_args}'") options_bootstrapper = create_options_bootstrapper(args=args) target = PythonTestsAdaptor(address=BuildFileAddress( rel_path=f"{self.source_root}/BUILD", target_name="target"), ) test_result = self.request_single_product( TestResult, Params(target, options_bootstrapper)) debug_request = self.request_single_product( TestDebugRequest, Params(target, options_bootstrapper), ) debug_result = InteractiveRunner( self.scheduler).run_local_interactive_process(debug_request.ipr) if test_result.status == Status.SUCCESS: assert debug_result.process_exit_code == 0 else: assert debug_result.process_exit_code != 0 return test_result
def run_pytest( self, *, passthrough_args: Optional[str] = None, origin: Optional[OriginSpec] = None, ) -> TestResult: args = [ "--backend-packages2=pants.backend.python", # pin to lower versions so that we can run Python 2 tests "--pytest-version=pytest>=4.6.6,<4.7", "--pytest-pytest-plugins=['zipp==1.0.0']", ] if passthrough_args: args.append(f"--pytest-args='{passthrough_args}'") options_bootstrapper = create_options_bootstrapper(args=args) address = Address(self.source_root, "target") if origin is None: origin = SingleAddress(directory=address.spec_path, name=address.target_name) tgt = PythonTests({}, address=address) params = Params( PythonTestConfiguration.create(TargetWithOrigin(tgt, origin)), options_bootstrapper) test_result = self.request_single_product(TestResult, params) debug_request = self.request_single_product(TestDebugRequest, params) debug_result = InteractiveRunner( self.scheduler).run_local_interactive_process(debug_request.ipr) if test_result.status == Status.SUCCESS: assert debug_result.process_exit_code == 0 else: assert debug_result.process_exit_code != 0 return test_result
def test_output_mixed(self): console = MockConsole(use_colors=False) options = MockOptions(debug=False) runner = InteractiveRunner(self.scheduler) target1 = self.make_build_target_address( "testprojects/tests/python/pants/passes") target2 = self.make_build_target_address( "testprojects/tests/python/pants/fails") def make_result(target): if target == target1: tr = TestResult(status=Status.SUCCESS, stdout='I passed\n', stderr='') elif target == target2: tr = TestResult(status=Status.FAILURE, stdout='I failed\n', stderr='') else: raise Exception("Unrecognised target") return AddressAndTestResult(target, tr) def make_debug_request(target): request = TestDebugRequest(ipr=self.make_successful_ipr( ) if target == target1 else self.make_failure_ipr()) return AddressAndDebugRequest(target, request) res = run_rule( run_tests, rule_args=[console, options, runner, (target1, target2)], mock_gets=[ MockGet(product_type=AddressAndTestResult, subject_type=Address, mock=make_result), MockGet(product_type=AddressAndDebugRequest, subject_type=Address, mock=make_debug_request), MockGet(product_type=BuildFileAddress, subject_type=BuildFileAddresses, mock=lambda tgt: BuildFileAddress( rel_path=f'{tgt.spec_path}/BUILD', target_name=tgt.target_name, )), ], ) self.assertEqual(1, res.exit_code) self.assertEquals( console.stdout.getvalue(), dedent("""\ testprojects/tests/python/pants/passes stdout: I passed testprojects/tests/python/pants/fails stdout: I failed testprojects/tests/python/pants/passes ..... SUCCESS testprojects/tests/python/pants/fails ..... FAILURE """))
def single_target_run( self, *, console: MockConsole, program_text: bytes, address_spec: str, ) -> Run: workspace = Workspace(self.scheduler) interactive_runner = InteractiveRunner(self.scheduler) BuildRoot().path = self.build_root res = run_rule( run, rule_args=[ console, workspace, interactive_runner, BuildRoot(), Addresses([Address.parse(address_spec)]), MockOptions(args=[]), ], mock_gets=[ MockGet( product_type=CreatedBinary, subject_type=Address, mock=lambda _: self.create_mock_binary(program_text), ), ], ) return cast(Run, res)
def single_target_run( self, *, console: MockConsole, program_text: bytes, address_spec: str, ) -> Run: workspace = Workspace(self.scheduler) interactive_runner = InteractiveRunner(self.scheduler) address = Address.parse(address_spec) bfa = BuildFileAddress(build_file=None, target_name=address.target_name, rel_path=f'{address.spec_path}/BUILD') BuildRoot().path = self.build_root res = run_rule( run, rule_args=[ console, workspace, interactive_runner, BuildRoot(), bfa, MockOptions(args=[]) ], mock_gets=[ MockGet(product_type=CreatedBinary, subject_type=Address, mock=lambda _: self.create_mock_binary(program_text)), ], ) return cast(Run, res)
def test_repl_ipython(self) -> None: self.setup_python_library() output = self.execute_rule( global_args=["--backend-packages2=pants.backend.python"], args=["--shell=ipython", "src/python:some_lib"], additional_params=[InteractiveRunner(self.scheduler)], ) assert output == "REPL exited successfully."
def run_test_rule( self, *, test_runner: Type[TestRunner], targets: List[HydratedTargetWithOrigin], debug: bool = False, ) -> Tuple[int, str]: console = MockConsole(use_colors=False) options = MockOptions(debug=debug, run_coverage=False) interactive_runner = InteractiveRunner(self.scheduler) workspace = Workspace(self.scheduler) union_membership = UnionMembership( {TestRunner: OrderedSet([test_runner])}) def mock_coordinator_of_tests( wrapped_test_runner: WrappedTestRunner, ) -> AddressAndTestResult: runner = wrapped_test_runner.runner return AddressAndTestResult( address=runner.adaptor_with_origin.adaptor.address, test_result=runner.test_result, # type: ignore[attr-defined] ) result: Test = run_rule( run_tests, rule_args=[ console, options, interactive_runner, HydratedTargetsWithOrigins(targets), workspace, union_membership, ], mock_gets=[ MockGet( product_type=AddressAndTestResult, subject_type=WrappedTestRunner, mock=lambda wrapped_test_runner: mock_coordinator_of_tests( wrapped_test_runner), ), MockGet( product_type=TestDebugRequest, subject_type=TestRunner, mock=lambda _: TestDebugRequest(self.make_ipr()), ), MockGet( product_type=CoverageReport, subject_type=CoverageDataBatch, mock=lambda _: FilesystemCoverageReport( result_digest=EMPTY_DIRECTORY_DIGEST, directory_to_materialize_to=PurePath("mockety/mock"), report_file=None, ), ), ], union_membership=union_membership, ) return result.exit_code, console.stdout.getvalue()
def test_materialize_input_files(self) -> None: program_text = b'#!/usr/bin/python\nprint("hello")' binary = self.create_mock_binary(program_text) interactive_runner = InteractiveRunner(self.scheduler) request = InteractiveProcessRequest( argv=("./program.py",), run_in_workspace=False, input_files=binary.digest, ) result = interactive_runner.run_local_interactive_process(request) self.assertEqual(result.process_exit_code, 0)
def test_repl_bogus_repl_name(self) -> None: self.setup_python_library() result = self.execute_rule( global_args=["--backend-packages2=pants.backend.python"], args=["--shell=bogus-repl", "src/python/lib.py"], additional_params=[InteractiveRunner(self.scheduler)], exit_code=-1, ) assert "'bogus-repl' is not a registered REPL. Available REPLs" in result.stderr
def run_goal_rules( self, *, options_bootstrapper: OptionsBootstrapper, union_membership: UnionMembership, options: Options, goals: Iterable[str], specs: Specs, ): """Runs @goal_rules sequentially and interactively by requesting their implicit Goal products. For retryable failures, raises scheduler.ExecutionError. :returns: An exit code. """ global_options = options.for_global_scope() console = Console( use_colors=global_options.colors, session=self.scheduler_session if global_options.get("v2_ui") else None, ) workspace = Workspace(self.scheduler_session) interactive_runner = InteractiveRunner(self.scheduler_session) for goal in goals: goal_product = self.goal_map[goal] # NB: We no-op for goals that have no V2 implementation because no relevant backends are # registered. This allows us to safely set `--v1 --v2`, even if no V2 backends are registered. # Once V1 is removed, we might want to reconsider the behavior to instead warn or error when # trying to run something like `./pants run` without any backends registered. is_implemented = union_membership.has_members_for_all( goal_product.subsystem_cls.required_union_implementations) if not is_implemented: continue params = Params( specs.provided_specs, options_bootstrapper, console, workspace, interactive_runner, ) logger.debug( f"requesting {goal_product} to satisfy execution of `{goal}` goal" ) try: exit_code = self.scheduler_session.run_goal_rule( goal_product, params) finally: console.flush() if exit_code != PANTS_SUCCEEDED_EXIT_CODE: return exit_code return PANTS_SUCCEEDED_EXIT_CODE
def test_repl_bogus_repl_name(self) -> None: self.setup_python_library() output = self.execute_rule( global_args=["--backend-packages2=pants.backend.python"], args=["--shell=bogus-repl", "src/python:some_lib"], additional_params=[InteractiveRunner(self.scheduler)], exit_code=-1, ) assert "bogus-repl is not an installed REPL program. Available REPLs:" in output
def test_repl_with_targets(self) -> None: self.setup_python_library() self.execute_rule( global_args=[ "--backend-packages2=pants.backend.python", "--source-root-patterns=src/python", ], args=["src/python/lib.py"], additional_params=[InteractiveRunner(self.scheduler)], )
def single_target_run( self, *, console: MockConsole, program_text: bytes, address_spec: str, ) -> Run: workspace = Workspace(self.scheduler) interactive_runner = InteractiveRunner(self.scheduler) class TestBinaryConfiguration(BinaryConfiguration): required_fields = () class TestBinaryTarget(Target): alias = "binary" core_fields = () address = Address.parse(address_spec) origin = SingleAddress(address.spec_path, address.target_name) res = run_rule( run, rule_args=[ console, workspace, interactive_runner, BuildRoot(), TargetsWithOrigins([ TargetWithOrigin( target=TestBinaryTarget(unhydrated_values={}, address=address), origin=origin, ) ]), create_goal_subsystem(RunOptions, args=[]), create_subsystem(GlobalOptions, pants_workdir=self.pants_workdir), UnionMembership(union_rules={ BinaryConfiguration: OrderedSet([TestBinaryConfiguration]) }), RegisteredTargetTypes.create([TestBinaryTarget]), ], mock_gets=[ MockGet( product_type=CreatedBinary, subject_type=TestBinaryConfiguration, mock=lambda _: self.create_mock_binary(program_text), ), ], ) return cast(Run, res)
def single_target_run( self, *, console: MockConsole, program_text: bytes, address_spec: str, ) -> Run: workspace = Workspace(self.scheduler) interactive_runner = InteractiveRunner(self.scheduler) class TestBinaryFieldSet(BinaryFieldSet): required_fields = () class TestBinaryTarget(Target): alias = "binary" core_fields = () address = Address.parse(address_spec) target = TestBinaryTarget({}, address=address) target_with_origin = TargetWithOrigin( target, SingleAddress(address.spec_path, address.target_name)) field_set = TestBinaryFieldSet.create(target) res = run_rule( run, rule_args=[ create_goal_subsystem(RunOptions, args=[]), create_subsystem(GlobalOptions, pants_workdir=self.pants_workdir), console, interactive_runner, workspace, BuildRoot(), ], mock_gets=[ MockGet( product_type=TargetsToValidFieldSets, subject_type=TargetsToValidFieldSetsRequest, mock=lambda _: TargetsToValidFieldSets( {target_with_origin: [field_set]}), ), MockGet( product_type=CreatedBinary, subject_type=TestBinaryFieldSet, mock=lambda _: self.create_mock_binary(program_text), ), ], ) return cast(Run, res)
def run_goal_rules( self, options_bootstrapper: OptionsBootstrapper, options: Options, goals: Iterable[str], specs: Specs, ): """Runs @goal_rules sequentially and interactively by requesting their implicit Goal products. For retryable failures, raises scheduler.ExecutionError. :returns: An exit code. """ global_options = options.for_global_scope() console = Console( use_colors=global_options.colors, session=self.scheduler_session if global_options.get('v2_ui') else None, ) workspace = Workspace(self.scheduler_session) interactive_runner = InteractiveRunner(self.scheduler_session) for goal in goals: goal_product = self.goal_map[goal] params = Params( specs.provided_specs, options_bootstrapper, console, workspace, interactive_runner, ) logger.debug( f'requesting {goal_product} to satisfy execution of `{goal}` goal' ) try: exit_code = self.scheduler_session.run_goal_rule( goal_product, params) finally: console.flush() if exit_code != PANTS_SUCCEEDED_EXIT_CODE: return exit_code return PANTS_SUCCEEDED_EXIT_CODE
def single_target_test(self, result, expected_console_output, success=True, debug=False): console = MockConsole(use_colors=False) options = MockOptions(debug=debug, run_coverage=False) runner = InteractiveRunner(self.scheduler) workspace = Workspace(self.scheduler) addr = Address.parse("some/target") res = run_rule( run_tests, rule_args=[ console, options, runner, self.make_addresses_with_origins(addr), workspace ], mock_gets=[ MockGet( product_type=AddressAndTestResult, subject_type=AddressWithOrigin, mock=lambda _: AddressAndTestResult(addr, result), ), MockGet( product_type=AddressAndDebugRequest, subject_type=AddressWithOrigin, mock=lambda _: AddressAndDebugRequest( addr, TestDebugRequest(ipr=self.make_successful_ipr() if success else self.make_failure_ipr( )), ), ), MockGet( product_type=CoverageReport, subject_type=CoverageDataBatch, mock=lambda _: CoverageReport( result_digest=EMPTY_DIRECTORY_DIGEST, directory_to_materialize_to=PurePath("mockety/mock"), ), ), ], ) assert console.stdout.getvalue() == expected_console_output assert (0 if success else 1) == res.exit_code
def run_test_rule( self, *, config: Type[TestConfiguration], targets: List[TargetWithOrigin], debug: bool = False, include_sources: bool = True, ) -> Tuple[int, str]: console = MockConsole(use_colors=False) options = MockOptions(debug=debug, run_coverage=False) interactive_runner = InteractiveRunner(self.scheduler) workspace = Workspace(self.scheduler) union_membership = UnionMembership( {TestConfiguration: OrderedSet([config])}) def mock_coordinator_of_tests( wrapped_config: WrappedTestConfiguration, ) -> AddressAndTestResult: config = wrapped_config.config return AddressAndTestResult( address=config.address, test_result=config.test_result, # type: ignore[attr-defined] ) result: Test = run_rule( run_tests, rule_args=[ console, options, interactive_runner, TargetsWithOrigins(targets), workspace, union_membership, RegisteredTargetTypes.create([MockTarget]), ], mock_gets=[ MockGet( product_type=AddressAndTestResult, subject_type=WrappedTestConfiguration, mock=lambda wrapped_config: mock_coordinator_of_tests( wrapped_config), ), MockGet( product_type=TestDebugRequest, subject_type=TestConfiguration, mock=lambda _: TestDebugRequest(self.make_ipr()), ), MockGet( product_type=HydratedSources, subject_type=HydrateSourcesRequest, mock=lambda _: HydratedSources( Snapshot( directory_digest=EMPTY_DIRECTORY_DIGEST, files=cast(Tuple[str, ...], ("test.hs", ) if include_sources else ()), dirs=(), ), filespec={"globs": []}, ), ), MockGet( product_type=CoverageReport, subject_type=CoverageDataBatch, mock=lambda _: FilesystemCoverageReport( result_digest=EMPTY_DIRECTORY_DIGEST, directory_to_materialize_to=PurePath("mockety/mock"), report_file=None, ), ), ], union_membership=union_membership, ) return result.exit_code, console.stdout.getvalue()
def test_output_mixed(self) -> None: console = MockConsole(use_colors=False) options = MockOptions(debug=False, run_coverage=False) runner = InteractiveRunner(self.scheduler) workspace = Workspace(self.scheduler) address1 = Address.parse("testprojects/tests/python/pants/passes") address2 = Address.parse("testprojects/tests/python/pants/fails") def make_result( address_with_origin: AddressWithOrigin ) -> AddressAndTestResult: address = address_with_origin.address if address == address1: tr = TestResult(status=Status.SUCCESS, stdout="I passed\n", stderr="") elif address == address2: tr = TestResult(status=Status.FAILURE, stdout="I failed\n", stderr="") else: raise Exception("Unrecognised target") return AddressAndTestResult(address, tr) def make_debug_request( address_with_origin: AddressWithOrigin ) -> AddressAndDebugRequest: address = address_with_origin.address request = TestDebugRequest(ipr=self.make_successful_ipr( ) if address == address1 else self.make_failure_ipr()) return AddressAndDebugRequest(address, request) res = run_rule( run_tests, rule_args=[ console, options, runner, self.make_addresses_with_origins(address1, address2), workspace, ], mock_gets=[ MockGet( product_type=AddressAndTestResult, subject_type=AddressWithOrigin, mock=make_result, ), MockGet( product_type=AddressAndDebugRequest, subject_type=AddressWithOrigin, mock=make_debug_request, ), MockGet( product_type=CoverageReport, subject_type=CoverageDataBatch, mock=lambda _: CoverageReport( result_digest=EMPTY_DIRECTORY_DIGEST, directory_to_materialize_to=PurePath("mockety/mock"), ), ), ], ) self.assertEqual(1, res.exit_code) self.assertEqual( console.stdout.getvalue(), dedent("""\ testprojects/tests/python/pants/passes stdout: I passed testprojects/tests/python/pants/fails stdout: I failed testprojects/tests/python/pants/passes ..... SUCCESS testprojects/tests/python/pants/fails ..... FAILURE """), )
def run_test_rule( self, *, field_set: Type[TestFieldSet], targets: List[TargetWithOrigin], debug: bool = False, use_coverage: bool = False, include_sources: bool = True, valid_targets: bool = True, ) -> Tuple[int, str]: console = MockConsole(use_colors=False) options = create_goal_subsystem(TestOptions, debug=debug, use_coverage=use_coverage) interactive_runner = InteractiveRunner(self.scheduler) workspace = Workspace(self.scheduler) union_membership = UnionMembership({ TestFieldSet: [field_set], CoverageDataCollection: [MockCoverageDataCollection] }) def mock_find_valid_field_sets( _: TargetsToValidFieldSetsRequest, ) -> TargetsToValidFieldSets: if not valid_targets: return TargetsToValidFieldSets({}) return TargetsToValidFieldSets({ tgt_with_origin: [field_set.create(tgt_with_origin)] for tgt_with_origin in targets }) def mock_coordinator_of_tests( wrapped_field_set: WrappedTestFieldSet, ) -> AddressAndTestResult: field_set = cast(MockTestFieldSet, wrapped_field_set.field_set) return AddressAndTestResult(address=field_set.address, test_result=field_set.test_result) def mock_coverage_report_generation( coverage_data_collection: MockCoverageDataCollection, ) -> CoverageReport: addresses = ", ".join( coverage_data.address.spec for coverage_data in coverage_data_collection) return ConsoleCoverageReport(f"Ran coverage on {addresses}") result: Test = run_rule( run_tests, rule_args=[ console, options, interactive_runner, workspace, union_membership ], mock_gets=[ MockGet( product_type=TargetsToValidFieldSets, subject_type=TargetsToValidFieldSetsRequest, mock=mock_find_valid_field_sets, ), MockGet( product_type=AddressAndTestResult, subject_type=WrappedTestFieldSet, mock=lambda wrapped_config: mock_coordinator_of_tests( wrapped_config), ), MockGet( product_type=TestDebugRequest, subject_type=TestFieldSet, mock=lambda _: TestDebugRequest(self.make_ipr()), ), MockGet( product_type=FieldSetsWithSources, subject_type=FieldSetsWithSourcesRequest, mock=lambda field_sets: FieldSetsWithSources( field_sets if include_sources else ()), ), MockGet( product_type=CoverageReport, subject_type=CoverageDataCollection, mock=mock_coverage_report_generation, ), ], union_membership=union_membership, ) assert not console.stdout.getvalue() return result.exit_code, console.stderr.getvalue()