def test_streaming_workunit_log_levels(self) -> None: scheduler, tracker, handler = self._fixture_for_rules( [ rule_one_function, rule_two, rule_three, rule_four, QueryRule(Beta, (Input, )) ], max_workunit_verbosity=LogLevel.TRACE, ) with handler.session(): i = Input() scheduler.product_request(Beta, subjects=[i]) assert tracker.finished finished = list( itertools.chain.from_iterable(tracker.finished_workunit_chunks)) # With the max_workunit_verbosity set to TRACE, we should see the workunit corresponding to the Select node. select = next( item for item in finished if item["name"] not in { "canonical_rule_one", "pants.engine.internals.engine_test.rule_two", "pants.engine.internals.engine_test.rule_three", "pants.engine.internals.engine_test.rule_four", }) assert select["name"] == "select" assert select["level"] == "TRACE" r1 = next(item for item in finished if item["name"] == "canonical_rule_one") assert r1["parent_id"] == select["span_id"]
def test_engine_aware_none_case(self): @dataclass(frozen=True) # If level() returns None, the engine shouldn't try to set # a new workunit level. class ModifiedOutput(EngineAwareReturnType): _level: Optional[LogLevel] val: int def level(self): return self._level @rule(desc="a_rule") def a_rule(n: int) -> ModifiedOutput: return ModifiedOutput(val=n, _level=None) scheduler, tracker, handler = self._fixture_for_rules( [a_rule, QueryRule(ModifiedOutput, (int, ))], max_workunit_verbosity=LogLevel.TRACE) with handler.session(): scheduler.product_request(ModifiedOutput, subjects=[0]) finished = list( itertools.chain.from_iterable(tracker.finished_workunit_chunks)) workunit = next( item for item in finished if item["name"] == "pants.engine.internals.engine_test.a_rule") assert workunit["level"] == "TRACE"
def rule_runner() -> RuleRunner: rule_runner = RuleRunner( rules=[ *awslambda_python_rules(), *awslambda_python_subsystem_rules(), *core_target_types_rules(), *package_pex_binary.rules(), *python_target_types_rules(), *target_rules(), QueryRule(BuiltPackage, (PythonAwsLambdaFieldSet, )), ], target_types=[ FileTarget, FilesGeneratorTarget, PexBinary, PythonAWSLambda, PythonRequirementTarget, PythonRequirementTarget, PythonSourcesGeneratorTarget, RelocatedFiles, ResourcesGeneratorTarget, ], ) rule_runner.set_options([], env_inherit={"PATH", "PYENV_ROOT", "HOME"}) return rule_runner
def test_streaming_workunits_expanded_specs(run_tracker: RunTracker) -> None: rule_runner = RuleRunner( target_types=[PythonSourcesGeneratorTarget], rules=[ QueryRule(ProcessResult, (Process, )), ], ) rule_runner.set_options(["--backend-packages=pants.backend.python"]) rule_runner.write_files({ "src/python/somefiles/BUILD": "python_sources()", "src/python/somefiles/a.py": "print('')", "src/python/somefiles/b.py": "print('')", "src/python/others/BUILD": "python_sources()", "src/python/others/a.py": "print('')", "src/python/others/b.py": "print('')", }) specs = SpecsParser().parse_specs( ["src/python/somefiles::", "src/python/others/b.py"], convert_dir_literal_to_address_literal=False, description_of_origin="tests", ) class Callback(WorkunitsCallback): @property def can_finish_async(self) -> bool: return False def __call__(self, **kwargs) -> None: context = kwargs["context"] assert isinstance(context, StreamingWorkunitContext) expanded = context.get_expanded_specs() targets = expanded.targets assert len(targets.keys()) == 2 assert targets["src/python/others/b.py"] == [ TargetInfo(filename="src/python/others/b.py") ] assert set(targets["src/python/somefiles"]) == { TargetInfo(filename="src/python/somefiles/a.py"), TargetInfo(filename="src/python/somefiles/b.py"), } handler = StreamingWorkunitHandler( scheduler=rule_runner.scheduler, run_tracker=run_tracker, callbacks=[Callback()], report_interval_seconds=0.01, max_workunit_verbosity=LogLevel.INFO, specs=specs, options_bootstrapper=create_options_bootstrapper( ["--backend-packages=pants.backend.python"]), allow_async_completion=False, ) stdout_process = Process(argv=("/bin/bash", "-c", "/bin/echo 'stdout output'"), description="Stdout process") with handler: rule_runner.request(ProcessResult, [stdout_process])
def rule_runner() -> RuleRunner: return RuleRunner(rules=[ *pex_rules(), QueryRule(GlobalOptions, []), QueryRule(Pex, (PexRequest, )), QueryRule(VenvPex, (PexRequest, )), QueryRule(Process, (PexProcess, )), QueryRule(Process, (VenvPexProcess, )), QueryRule(ProcessResult, (Process, )), QueryRule(PexResolveInfo, (Pex, )), QueryRule(PexResolveInfo, (VenvPex, )), QueryRule(PexPEX, ()), ], )
def test_setup_lockfile_interpreter_constraints() -> None: rule_runner = RuleRunner( rules=[ *subsystem_rules(), QueryRule(PythonLockfileRequest, [IPythonLockfileSentinel]) ], target_types=[PythonSourcesGeneratorTarget, GenericTarget], ) global_constraint = "==3.9.*" rule_runner.set_options( ["--ipython-lockfile=lockfile.txt"], env={ "PANTS_PYTHON_INTERPRETER_CONSTRAINTS": f"['{global_constraint}']" }, ) def assert_ics(build_file: str, expected: list[str]) -> None: rule_runner.write_files({"project/BUILD": build_file}) lockfile_request = rule_runner.request(PythonLockfileRequest, [IPythonLockfileSentinel()]) assert lockfile_request.interpreter_constraints == InterpreterConstraints( expected) assert_ics("python_sources()", [global_constraint]) assert_ics("python_sources(interpreter_constraints=['==2.7.*'])", ["==2.7.*"]) assert_ics( "python_sources(interpreter_constraints=['==2.7.*', '==3.5.*'])", ["==2.7.*", "==3.5.*"]) # If no Python targets in repo, fall back to global [python] constraints. assert_ics("target()", [global_constraint]) # If there are multiple distinct ICs in the repo, we OR them. Even though the user might AND # them by running `./pants repl ::`, they could also run on more precise subsets like # `./pants repl py2::` and then `./pants repl py3::` assert_ics( dedent("""\ python_sources(name='a', interpreter_constraints=['==2.7.*']) python_sources(name='b', interpreter_constraints=['==3.5.*']) """), ["==2.7.*", "==3.5.*"], ) assert_ics( dedent("""\ python_sources(name='a', interpreter_constraints=['==2.7.*', '==3.5.*']) python_sources(name='b', interpreter_constraints=['>=3.5']) """), ["==2.7.*", "==3.5.*", ">=3.5"], ) assert_ics( dedent("""\ python_sources(name='a') python_sources(name='b', interpreter_constraints=['==2.7.*']) python_sources(name='c', interpreter_constraints=['>=3.6']) """), ["==2.7.*", global_constraint, ">=3.6"], )
def test_streaming_workunit_log_level_parent_rewrite(self) -> None: rules = [rule_A, rule_B, rule_C, QueryRule(Alpha, (Input,))] scheduler = self.mk_scheduler( rules, include_trace_on_error=False, should_report_workunits=True ) tracker = WorkunitTracker() info_level_handler = StreamingWorkunitHandler( scheduler, callbacks=[tracker.add], report_interval_seconds=0.01, max_workunit_verbosity=LogLevel.INFO, ) with info_level_handler.session(): i = Input() scheduler.product_request(Alpha, subjects=[i]) assert tracker.finished finished = list(itertools.chain.from_iterable(tracker.finished_workunit_chunks)) assert len(finished) == 2 r_A = next( item for item in finished if item["name"] == "pants.engine.internals.engine_test.rule_A" ) r_C = next( item for item in finished if item["name"] == "pants.engine.internals.engine_test.rule_C" ) assert "parent_id" not in r_A assert r_C["parent_id"] == r_A["span_id"] scheduler = self.mk_scheduler( rules, include_trace_on_error=False, should_report_workunits=True ) tracker = WorkunitTracker() debug_level_handler = StreamingWorkunitHandler( scheduler, callbacks=[tracker.add], report_interval_seconds=0.01, max_workunit_verbosity=LogLevel.TRACE, ) with debug_level_handler.session(): i = Input() scheduler.product_request(Alpha, subjects=[i]) assert tracker.finished finished = list(itertools.chain.from_iterable(tracker.finished_workunit_chunks)) r_A = next( item for item in finished if item["name"] == "pants.engine.internals.engine_test.rule_A" ) r_B = next( item for item in finished if item["name"] == "pants.engine.internals.engine_test.rule_B" ) r_C = next( item for item in finished if item["name"] == "pants.engine.internals.engine_test.rule_C" ) assert r_B["parent_id"] == r_A["span_id"] assert r_C["parent_id"] == r_B["span_id"]
def test_get_requirements_with_exclude() -> None: rule_runner = create_setup_py_rule_runner( rules=[ determine_setup_kwargs, get_requirements, get_owned_dependencies, get_exporting_owner, SubsystemRule(SetupPyGeneration), QueryRule(ExportedTargetRequirements, (DependencyOwner,)), ] ) rule_runner.add_to_build_file( "3rdparty", textwrap.dedent( """ python_requirement_library( name='ext1', requirements=['ext1==1.22.333'], ) python_requirement_library( name='ext2', requirements=['ext2==4.5.6'], ) python_requirement_library( name='ext3', requirements=['ext3==0.0.1'], ) """ ), ) rule_runner.add_to_build_file( "src/python/foo/bar/baz", "python_library(dependencies=['3rdparty:ext1'], sources=[])", ) rule_runner.add_to_build_file( "src/python/foo/bar/qux", "python_library(dependencies=['3rdparty:ext2', 'src/python/foo/bar/baz'], sources=[])", ) rule_runner.add_to_build_file( "src/python/foo/bar", textwrap.dedent( """ python_distribution( name='bar-dist', dependencies=['!!3rdparty:ext2',':bar'], provides=setup_py(name='bar', version='9.8.7'), ) python_library( sources=[], dependencies=['src/python/foo/bar/baz', 'src/python/foo/bar/qux'], ) """ ), ) assert_requirements( rule_runner, ["ext1==1.22.333"], Address("src/python/foo/bar", target_name="bar-dist") )
def rule_runner() -> RuleRunner: return RuleRunner( rules=[ *target_type_rules(), *python_target_types_rules(), QueryRule(ResolvedPythonGoogleHandler, [ResolvePythonGoogleHandlerRequest]), QueryRule(InjectedDependencies, [InjectPythonCloudFunctionHandlerDependency]), ], target_types=[ FileTarget, PythonGoogleCloudFunction, PythonRequirementTarget, PythonSourcesGeneratorTarget, ], )
def test_strict_equals() -> None: rule_runner = RuleRunner( rules=[boolean_and_int, QueryRule(A, [int, bool])]) # With the default implementation of `__eq__` for boolean and int, `1 == True`. But in the # engine, that behavior would be surprising and would cause both of these Params to intern # to the same value, triggering an error. Instead, the engine additionally includes the # type of a value in equality. assert A() == rule_runner.request(A, [1, True])
def rule_runner() -> RuleRunner: return RuleRunner( rules=[ *python_requirements.rules(), QueryRule(_TargetParametrizations, [Address]), ], target_types=[PythonRequirementsTargetGenerator], )
def rule_runner() -> RuleRunner: return RuleRunner( rules=( *pipenv_requirements.rules(), QueryRule(_TargetParametrizations, [Address]), ), target_types=[PipenvRequirementsTargetGenerator], )
def test_nonexistent_root(self, tmp_path: Path) -> None: rules = [QueryRule(A, [B])] # No rules are available to compute A. with pytest.raises(ValueError) as cm: self.scheduler(tmp_path, rules, include_trace_on_error=False) assert ( "No installed rules return the type A, and it was not provided by potential callers of " ) in str(cm.value)
def rule_runner() -> RuleRunner: rule_runner = RuleRunner( rules=[ *config_files.rules(), *coursier_fetch_rules(), *coursier_setup_rules(), *external_tool_rules(), *source_files.rules(), *util_rules(), QueryRule(CoursierResolvedLockfile, (ArtifactRequirements, )), QueryRule(ClasspathEntry, (CoursierLockfileEntry, )), QueryRule(FileDigest, (ExtractFileDigest, )), ], target_types=[JvmDependencyLockfile, JvmArtifact], ) rule_runner.set_options(args=[], env_inherit=PYTHON_BOOTSTRAP_ENV) return rule_runner
def rule_runner() -> RuleRunner: return RuleRunner( rules=[ *pex_from_targets.rules(), QueryRule(PexRequest, (PexFromTargetsRequest, )), ], target_types=[PythonLibrary, PythonRequirementLibrary], )
def rule_runner() -> RuleRunner: return RuleRunner( rules=[ *pex_from_targets.rules(), QueryRule(PexRequest, (PexFromTargetsRequest, )), ], target_types=[PythonSourcesGeneratorTarget, PythonRequirementTarget], )
def rule_runner() -> RuleRunner: return RuleRunner( rules=[ *target_type_rules(), *python_target_types_rules(), QueryRule(ResolvedPythonAwsHandler, [ResolvePythonAwsHandlerRequest]), QueryRule(InjectedDependencies, [InjectPythonLambdaHandlerDependency]), ], target_types=[ FileTarget, PythonAWSLambda, PythonRequirementTarget, PythonSourcesGeneratorTarget, ], )
def rule_runner() -> RuleRunner: return RuleRunner( rules=[QueryRule(Targets, (Specs, ))], target_types=[PythonRequirementLibrary, PythonRequirementsFile], context_aware_object_factories={ "pipenv_requirements": PipenvRequirements }, )
def exporting_owner_rule_runner() -> RuleRunner: return create_setup_py_rule_runner( rules=[ get_exporting_owner, *target_types_rules.rules(), QueryRule(ExportedTarget, (OwnedDependency,)), ] )
def rule_runner() -> RuleRunner: rule_runner = RuleRunner( rules=[ *scala_parser.rules(), *jvm_tool.rules(), *source_files.rules(), *jdk_rules.rules(), *target_types.rules(), *jvm_util_rules.rules(), QueryRule(AnalyzeScalaSourceRequest, (SourceFilesRequest, )), QueryRule(ScalaSourceDependencyAnalysis, (AnalyzeScalaSourceRequest, )), ], target_types=[ScalaSourceTarget], ) rule_runner.set_options(args=["-ldebug"], env_inherit=PYTHON_BOOTSTRAP_ENV) return rule_runner
def test_streaming_workunits_reporting(self): scheduler, tracker, handler = self._fixture_for_rules([fib, QueryRule(Fib, (int,))]) with handler: scheduler.product_request(Fib, subjects=[0]) flattened = list(itertools.chain.from_iterable(tracker.finished_workunit_chunks)) # The execution of the single named @rule "fib" should be providing this one workunit. assert len(flattened) == 1 scheduler, tracker, handler = self._fixture_for_rules([fib, QueryRule(Fib, (int,))]) with handler: scheduler.product_request(Fib, subjects=[10]) # Requesting a bigger fibonacci number will result in more rule executions and thus # more reported workunits. In this case, we expect 11 invocations of the `fib` rule. flattened = list(itertools.chain.from_iterable(tracker.finished_workunit_chunks)) assert len(flattened) == 11 assert tracker.finished
def rule_runner() -> RuleRunner: return RuleRunner( rules=[ *subsystem.rules(), *skip_field.rules(), *config_files.rules(), *python_sources.rules(), *target_types_rules.rules(), QueryRule(MyPyConfigFile, []), QueryRule(MyPyFirstPartyPlugins, []), QueryRule(PythonLockfileRequest, [MyPyLockfileSentinel]), ], target_types=[ PythonSourcesGeneratorTarget, PythonRequirementTarget, GenericTarget ], )
def test_no_include_trace_error_raises_boring_error(self): rules = [nested_raise, QueryRule(A, (B, ))] scheduler = self.scheduler(rules, include_trace_on_error=False) with self.assertRaises(ExecutionError) as cm: list(scheduler.product_request(A, subjects=[(B())])) self.assert_equal_with_printing( "1 Exception encountered:\n\n Exception: An exception for B\n", str(cm.exception))
def test_jvm_tool_base_extracts_correct_coordinates() -> None: rule_runner = RuleRunner( rules=[ *config_files.rules(), *coursier_fetch_rules(), *coursier_setup_rules(), *external_tool_rules(), *source_files.rules(), *util_rules(), *jvm_tool.rules(), *lockfile_rules(), generate_test_tool_lockfile_request, SubsystemRule(MockJvmTool), QueryRule(GenerateJvmLockfile, (MockJvmToolLockfileSentinel, )), QueryRule(DigestContents, (Digest, )), ], target_types=[JvmArtifactTarget], ) rule_runner.set_options( args=[ "--mock-tool-artifacts=//:junit_junit", "--mock-tool-lockfile=/dev/null", ], env_inherit=PYTHON_BOOTSTRAP_ENV, ) rule_runner.write_files({ "BUILD": textwrap.dedent("""\ jvm_artifact( name="junit_junit", group="junit", artifact="junit", version="4.13.2", ) """) }) lockfile_request = rule_runner.request(GenerateJvmLockfile, [MockJvmToolLockfileSentinel()]) coordinates = sorted(i.coordinate for i in lockfile_request.artifacts) assert coordinates == [ Coordinate(group="junit", artifact="junit", version="4.13.2"), Coordinate(group="org.hamcrest", artifact="hamcrest-core", version="1.3"), ]
def rule_runner() -> RuleRunner: rule_runner = RuleRunner( rules=[ *coursier_fetch_rules(), *lockfile.rules(), *coursier_setup_rules(), *external_tool_rules(), *source_files.rules(), *util_rules(), QueryRule(UserGenerateLockfiles, [RequestedJVMUserResolveNames]), QueryRule(GenerateLockfileResult, [GenerateJvmLockfile]), ], target_types=[JvmArtifactTarget], objects={"parametrize": Parametrize}, ) rule_runner.set_options([], env_inherit={"PATH"}) return rule_runner
def rule_runner() -> RuleRunner: return RuleRunner( rules=[ create_coverage_config, *pytest_runner.rules(), *pex_from_targets.rules(), *dependency_inference_rules.rules(), # For conftest detection. *distdir.rules(), *binary.rules(), *package_pex_binary.rules(), get_filtered_environment, resolve_pex_entry_point, QueryRule(TestResult, (PythonTestFieldSet,)), QueryRule(TestDebugRequest, (PythonTestFieldSet,)), ], target_types=[PexBinary, PythonLibrary, PythonTests, PythonRequirementLibrary], )
def rule_runner() -> RuleRunner: return RuleRunner(rules=[ enrich_fmt_result, format_python_target, *black_rules(), *isort_rules(), QueryRule(LanguageFmtResults, (PythonFmtTargets, )), ])
def rule_runner() -> RuleRunner: return RuleRunner( preserve_tmpdirs=True, rules=[ *util_rules(), QueryRule(FileDigest, (ExtractFileDigest, )), ], )
def rule_runner() -> RuleRunner: return RuleRunner( rules=[ *pylint_rules(), QueryRule(LintResults, (PylintRequest, )), ], target_types=[PythonLibrary, PythonRequirementLibrary], )
def rule_runner() -> RuleRunner: return RuleRunner( rules=[ *awslambda_python_rules(), *target_rules(), QueryRule(BuiltPackage, (PythonAwsLambdaFieldSet, )), ], target_types=[PythonAWSLambda, PythonLibrary], )