def test_cache_scope_never(rule_runner: RuleRunner) -> None: process = Process( argv=("/bin/bash", "-c", "echo $RANDOM"), cache_scope=ProcessCacheScope.NEVER, description="random", ) result_one = rule_runner.request(FallibleProcessResult, [process]) rule_runner.new_session("next attempt") result_two = rule_runner.request(FallibleProcessResult, [process]) # Should re-run in a new Session. assert result_one.stdout != result_two.stdout
def test_cache_scope_always(rule_runner: RuleRunner) -> None: # Should not re-run on failure, even in a new Session. process = Process( argv=("/bin/bash", "-c", "echo $RANDOM && exit 1"), cache_scope=ProcessCacheScope.ALWAYS, description="failure", ) result_one = rule_runner.request(FallibleProcessResult, [process]) rule_runner.new_session("session two") result_two = rule_runner.request(FallibleProcessResult, [process]) assert result_one is result_two
def test_cache_scope_per_session(rule_runner: RuleRunner) -> None: process = Process( argv=("/bin/bash", "-c", "echo $RANDOM"), cache_scope=ProcessCacheScope.PER_SESSION, description="random", ) result_one = rule_runner.request(FallibleProcessResult, [process]) result_two = rule_runner.request(FallibleProcessResult, [process]) assert result_one is result_two rule_runner.new_session("next attempt") result_three = rule_runner.request(FallibleProcessResult, [process]) # Should re-run in a new Session. assert result_one != result_three
def test_cache_scope_successful(rule_runner: RuleRunner) -> None: # Should not re-run on success, even in a new Session. process = Process( argv=("/bin/bash", "-c", "echo $RANDOM"), cache_scope=ProcessCacheScope.SUCCESSFUL, description="success", ) result_one = rule_runner.request(FallibleProcessResult, [process]) rule_runner.new_session("session one") result_two = rule_runner.request(FallibleProcessResult, [process]) assert result_one is result_two # Should re-run on failure, but only in a new Session. process = Process( argv=("/bin/bash", "-c", "echo $RANDOM && exit 1"), cache_scope=ProcessCacheScope.SUCCESSFUL, description="failure", ) result_three = rule_runner.request(FallibleProcessResult, [process]) result_four = rule_runner.request(FallibleProcessResult, [process]) rule_runner.new_session("session two") result_five = rule_runner.request(FallibleProcessResult, [process]) assert result_three is result_four assert result_four != result_five
def test_worktree_invalidation(origin: Path) -> None: # Confirm that requesting the worktree in two different sessions results in new instances, # and that the consuming `@rule` also reruns. with pushd(origin.as_posix()): init_repo("origin", origin) @rule async def worktree_id_string() -> str: worktree = await Get(MaybeGitWorktree, GitWorktreeRequest()) return str(id(worktree)) rule_runner = RuleRunner(rules=[ worktree_id_string, QueryRule(str, []), ]) rule_runner.set_options([], env_inherit={"PATH"}) worktree_id_1 = rule_runner.request(str, []) rule_runner.new_session("second-session") rule_runner.set_options([], env_inherit={"PATH"}) worktree_id_2 = rule_runner.request(str, []) assert worktree_id_1 != worktree_id_2
def test_pex_working_directory(rule_runner: RuleRunner, pex_type: type[Pex | VenvPex]) -> None: named_caches_dir = rule_runner.request(GlobalOptions, []).named_caches_dir sources = rule_runner.request( Digest, [ CreateDigest((FileContent( path="main.py", content=textwrap.dedent(""" import os cwd = os.getcwd() print(f"CWD: {cwd}") for path, dirs, _ in os.walk(cwd): for name in dirs: print(f"DIR: {os.path.relpath(os.path.join(path, name), cwd)}") """).encode(), ), )), ], ) pex_data = create_pex_and_get_all_data( rule_runner, pex_type=pex_type, main=EntryPoint("main"), sources=sources, interpreter_constraints=InterpreterConstraints(["CPython>=3.6"]), ) pex_process_type = PexProcess if isinstance(pex_data.pex, Pex) else VenvPexProcess dirpath = "foo/bar/baz" runtime_files = rule_runner.request( Digest, [CreateDigest([Directory(path=dirpath)])]) dirpath_parts = os.path.split(dirpath) for i in range(0, len(dirpath_parts)): working_dir = os.path.join(*dirpath_parts[:i]) if i > 0 else None expected_subdir = os.path.join( *dirpath_parts[i:]) if i < len(dirpath_parts) else None process = rule_runner.request( Process, [ pex_process_type( pex_data.pex, description="Run the pex and check its cwd", working_directory=working_dir, input_digest=runtime_files, # We skip the process cache for this PEX to ensure that it re-runs. cache_scope=ProcessCacheScope.PER_SESSION, ) ], ) # For VenvPexes, run the PEX twice while clearing the venv dir in between. This emulates # situations where a PEX creation hits the process cache, while venv seeding misses the PEX # cache. if isinstance(pex_data.pex, VenvPex): # Request once to ensure that the directory is seeded, and then start a new session so # that the second run happens as well. _ = rule_runner.request(ProcessResult, [process]) rule_runner.new_session("re-run-for-venv-pex") rule_runner.set_options( ["--backend-packages=pants.backend.python"], env_inherit={"PATH", "PYENV_ROOT", "HOME"}, ) # Clear the cache. venv_dir = os.path.join(named_caches_dir, "pex_root", pex_data.pex.venv_rel_dir) assert os.path.isdir(venv_dir) safe_rmtree(venv_dir) result = rule_runner.request(ProcessResult, [process]) output_str = result.stdout.decode() mo = re.search(r"CWD: (.*)\n", output_str) assert mo is not None reported_cwd = mo.group(1) if working_dir: assert reported_cwd.endswith(working_dir) if expected_subdir: assert f"DIR: {expected_subdir}" in output_str