def test_illegal_import(self) -> None: prelude = dedent( """\ import os def make_target(): python_library() """ ).encode() address_mapper = unittest.mock.Mock() address_mapper.prelude_glob_patterns = ("prelude",) with self.assertRaisesRegex( Exception, "Import used in /dev/null/prelude at line 1\\. Import statements are banned" ): run_rule( evaluate_preludes, rule_args=[address_mapper,], mock_gets=[ MockGet( product_type=Snapshot, subject_type=PathGlobs, mock=lambda _: Snapshot(Digest("abc", 10), ("/dev/null/prelude",), ()), ), MockGet( product_type=FilesContent, subject_type=Digest, mock=lambda _: FilesContent( [FileContent(path="/dev/null/prelude", content=prelude)] ), ), ], )
def get_coverage_plugin_input() -> InputFilesContent: return InputFilesContent( FilesContent((FileContent( path=f"{COVERAGE_PLUGIN_MODULE_NAME}.py", content=pkg_resources.resource_string(__name__, "coverage_plugin/plugin.py"), ), )))
def test_good_prelude(self) -> None: address_mapper = unittest.mock.Mock() address_mapper.prelude_glob_patterns = ("prelude", ) address_mapper.build_file_imports_behavior = BuildFileImportsBehavior.error symbols = run_rule( evalute_preludes, rule_args=[ address_mapper, ], mock_gets=[ MockGet( product_type=Snapshot, subject_type=PathGlobs, mock=lambda _: Snapshot(Digest("abc", 10), ("/dev/null/prelude", ), ()), ), MockGet( product_type=FilesContent, subject_type=Digest, mock=lambda _: FilesContent([ FileContent(path="/dev/null/prelude", content=b"def foo(): return 1") ]), ), ], ) assert symbols.symbols["foo"]() == 1
def test_syntax_error(self) -> None: address_mapper = unittest.mock.Mock() address_mapper.prelude_glob_patterns = ("prelude",) with self.assertRaisesRegex( Exception, "Error parsing prelude file /dev/null/prelude: name 'blah' is not defined" ): run_rule( evaluate_preludes, rule_args=[address_mapper,], mock_gets=[ MockGet( product_type=Snapshot, subject_type=PathGlobs, mock=lambda _: Snapshot(Digest("abc", 10), ("/dev/null/prelude",), ()), ), MockGet( product_type=FilesContent, subject_type=Digest, mock=lambda _: FilesContent( [FileContent(path="/dev/null/prelude", content=b"blah")] ), ), ], )
def test_empty(self) -> None: """Test that parsing an empty BUILD file results in an empty AddressFamily.""" address_mapper = AddressMapper( parser=JsonParser(TEST_TABLE), prelude_glob_patterns=(), build_file_imports_behavior=BuildFileImportsBehavior.error, ) af = run_rule( parse_address_family, rule_args=[ address_mapper, BuildFilePreludeSymbols(FrozenDict()), Dir("/dev/null") ], mock_gets=[ MockGet( product_type=Snapshot, subject_type=PathGlobs, mock=lambda _: Snapshot(Digest("abc", 10), ("/dev/null/BUILD", ), ()), ), MockGet( product_type=FilesContent, subject_type=Digest, mock=lambda _: FilesContent( [FileContent(path="/dev/null/BUILD", content=b"")]), ), ], ) self.assertEqual(len(af.objects_by_name), 0)
def test_empty(self): """Test that parsing an empty BUILD file results in an empty AddressFamily.""" address_mapper = AddressMapper(JsonParser(TestTable())) af = run_rule(parse_address_family, address_mapper, Dir('/dev/null'), { (FilesContent, PathGlobs): lambda _: FilesContent([FileContent('/dev/null/BUILD', '')]) }) self.assertEquals(len(af.objects_by_name), 0)
def test_empty(self): """Test that parsing an empty BUILD file results in an empty AddressFamily.""" address_mapper = AddressMapper(JsonParser(TestTable())) af = run_rule(parse_address_family, address_mapper, Dir('/dev/null'), { (Snapshot, PathGlobs): lambda _: Snapshot(DirectoryDigest('abc', 10), (File('/dev/null/BUILD'),)), (FilesContent, DirectoryDigest): lambda _: FilesContent([FileContent('/dev/null/BUILD', b'')]), }) self.assertEqual(len(af.objects_by_name), 0)
def test_non_archive(self) -> None: input_snapshot = self.make_snapshot({"test.sh": b"# A shell script"}) extracted_digest = self.request_single_product( ExtractedDigest, Params(MaybeExtractable(input_snapshot.digest)) ) files_content = self.request_single_product(FilesContent, Params(extracted_digest.digest)) assert FilesContent([FileContent("test.sh", b"# A shell script")]) == files_content
def run_fmt_rule( *, targets: List[HydratedTarget], mock_formatter: Callable[[PythonTargetAdaptor], FmtResult], ) -> Tuple[Fmt, MockConsole]: console = MockConsole(use_colors=False) result: Fmt = run_rule( fmt, rule_args=[ console, HydratedTargets(targets), UnionMembership(union_rules={TargetWithSources: [PythonTargetAdaptor]}) ], mock_gets=[ MockGet(product_type=FmtResult, subject_type=PythonTargetAdaptor, mock=mock_formatter), MockGet(product_type=FilesContent, subject_type=Digest, mock=lambda _: FilesContent([])) ], ) return result, console
def test_empty(self) -> None: """Test that parsing an empty BUILD file results in an empty AddressFamily.""" address_mapper = AddressMapper(JsonParser(TEST_TABLE)) af = run_rule( parse_address_family, rule_args=[address_mapper, Dir("/dev/null")], mock_gets=[ MockGet( product_type=Snapshot, subject_type=PathGlobs, mock=lambda _: Snapshot(Digest("abc", 10), ("/dev/null/BUILD", ), ()), ), MockGet( product_type=FilesContent, subject_type=Digest, mock=lambda _: FilesContent( [FileContent(path="/dev/null/BUILD", content=b"")]), ), ], ) self.assertEqual(len(af.objects_by_name), 0)
def test_write_file(self): request = Process( argv=("/bin/bash", "-c", "echo -n 'European Burmese' > roland"), description="echo roland", output_files=("roland",), ) process_result = self.request_single_product(ProcessResult, request) self.assertEqual( process_result.output_digest, Digest( fingerprint="63949aa823baf765eff07b946050d76ec0033144c785a94d3ebd82baa931cd16", serialized_bytes_length=80, ), ) files_content_result = self.request_single_product( FilesContent, process_result.output_digest, ) assert files_content_result == FilesContent( [FileContent("roland", b"European Burmese", False)] )
def test_list_backends() -> None: # NB: Here, we assume that the code to find all the `register.py`s is valid. Instead, the focus # is on us being able to correctly extract all the relevant information from those # `register.py` files and then to format the information. all_register_pys = FilesContent([ FileContent( "src/python/pants/backend/fortran/register.py", dedent('''\ """Support for Fortran 98.""" # V1 entry-point def register_goals(): pass # This naively looks like a V2 entry-point, but it's not! def rules(x: int): pass ''').encode(), ), FileContent( "contrib/elixir/src/python/pants/contrib/elixir/register.py", dedent("""\ # V1 entry-point def register_goals(): pass # V2 entry-point def rules(): pass """).encode(), ), FileContent( "src/python/pants/core/register.py", dedent('''\ """Core V2 rules. These are always activated. """ def rules(): pass ''').encode(), ), ]) console = MockConsole(use_colors=False) run_rule( list_backends, rule_args=[ create_goal_subsystem(BackendsOptions, sep="\\n", output_file=None), global_subsystem_instance(SourceRootConfig), global_subsystem_instance(GlobalOptions), console, ], mock_gets=[ MockGet(product_type=Snapshot, subject_type=PathGlobs, mock=lambda _: EMPTY_SNAPSHOT), MockGet(product_type=FilesContent, subject_type=Digest, mock=lambda _: all_register_pys), ], ) assert console.stdout.getvalue() == dedent("""\ V1 backends ----------- To enable V1 backends, add the backend to `backend_packages.add` in your `pants.toml`, like this: [GLOBAL] backend_packages.add = ["pants.backend.python"] In the below list, all activated backends end with `*`. pants.backend.fortran Support for Fortran 98. pants.contrib.elixir <no description> V2 backends ----------- To enable V2 backends, add the backend to `backend_packages2.add` in your `pants.toml`, like this: [GLOBAL] backend_packages2.add = ["pants.backend.python"] In the below list, all activated backends end with `*`. pants.contrib.elixir <no description> pants.core* Core V2 rules. These are always activated. """)
# Step 4. # Materializing the Report on Disk: The `run_tests` rule exposes the report to the user. # # Now we have a directory full of html or an xml file full of coverage data and we want to expose # it to the user. This step happens in `test.py` and should handle all kinds of coverage reports, # not just pytest coverage. The test runner grabs all our individual test results and requests # a CoverageReport, and once it has one, it writes it down in `dist/coverage` (or wherever the user # has configured it.) COVERAGE_PLUGIN_MODULE_NAME = "__pants_coverage_plugin__" COVERAGE_PLUGIN_INPUT = InputFilesContent( FilesContent( ( FileContent( path=f"{COVERAGE_PLUGIN_MODULE_NAME}.py", content=pkg_resources.resource_string(__name__, "coverage_plugin/plugin.py"), ), ) ) ) @dataclass(frozen=True) class PytestCoverageData(CoverageData): address: Address digest: Digest class PytestCoverageDataCollection(CoverageDataCollection): element_type = PytestCoverageData
def run_cloc(console: Console, options: CountLinesOfCode.Options, cloc_script: DownloadedClocScript, specs: Specs) -> CountLinesOfCode: """Runs the cloc perl script in an isolated process""" transitive = options.values.transitive ignored = options.values.ignored if transitive: targets = yield Get(TransitiveHydratedTargets, Specs, specs) all_target_adaptors = {t.adaptor for t in targets.closure} else: targets = yield Get(HydratedTargets, Specs, specs) all_target_adaptors = {t.adaptor for t in targets} digests_to_merge = [] source_paths: Set[str] = set() for t in all_target_adaptors: sources = getattr(t, 'sources', None) if sources is not None: digests_to_merge.append(sources.snapshot.directory_digest) for f in sources.snapshot.files: source_paths.add(str(f)) file_content = '\n'.join(sorted(source_paths)).encode() input_files_filename = 'input_files.txt' report_filename = 'report.txt' ignore_filename = 'ignored.txt' input_file_list = InputFilesContent( FilesContent((FileContent(path=input_files_filename, content=file_content), ))) input_file_digest = yield Get(Digest, InputFilesContent, input_file_list) cloc_script_digest = cloc_script.digest digests_to_merge.extend([cloc_script_digest, input_file_digest]) digest = yield Get(Digest, DirectoriesToMerge(directories=tuple(digests_to_merge))) cmd = ( '/usr/bin/perl', cloc_script.script_path, '--skip-uniqueness', # Skip the file uniqueness check. f'--ignored={ignore_filename}', # Write the names and reasons of ignored files to this file. f'--report-file={report_filename}', # Write the output to this file rather than stdout. f'--list-file={input_files_filename}', # Read an exhaustive list of files to process from this file. ) req = ExecuteProcessRequest( argv=cmd, input_files=digest, output_files=(report_filename, ignore_filename), description='cloc', ) exec_result = yield Get(ExecuteProcessResult, ExecuteProcessRequest, req) files_content = yield Get(FilesContent, Digest, exec_result.output_directory_digest) file_outputs = { fc.path: fc.content.decode() for fc in files_content.dependencies } output = file_outputs[report_filename] for line in output.splitlines(): console.print_stdout(line) if ignored: console.print_stdout("\nIgnored the following files:") ignored = file_outputs[ignore_filename] for line in ignored.splitlines(): console.print_stdout(line) yield CountLinesOfCode(exit_code=0)
class ArchiveTest(TestBase): files = {"foo": b"bar", "hello/world": b"Hello, World!"} expected_files_content = FilesContent( [FileContent(name, content) for name, content in files.items()] ) @classmethod def rules(cls): return (*super().rules(), *archive_rules(), RootRule(Snapshot)) # TODO: Figure out a way to run these tests without a TestBase subclass, and use # pytest.mark.parametrize. def _do_test_extract_zip(self, compression) -> None: io = BytesIO() with zipfile.ZipFile(io, "w", compression=compression) as zf: for name, content in self.files.items(): zf.writestr(name, content) io.flush() input_snapshot = self.make_snapshot({"test.zip": io.getvalue()}) extracted_digest = self.request_single_product( ExtractedDigest, Params(MaybeExtractable(input_snapshot.digest)) ) files_content = self.request_single_product(FilesContent, Params(extracted_digest.digest)) assert self.expected_files_content == files_content def test_extract_zip_stored(self) -> None: self._do_test_extract_zip(zipfile.ZIP_STORED) def test_extract_zip_deflated(self) -> None: self._do_test_extract_zip(zipfile.ZIP_DEFLATED) # TODO: Figure out a way to run these tests without a TestBase subclass, and use # pytest.mark.parametrize. def _do_test_extract_tar(self, compression) -> None: io = BytesIO() mode = f"w:{compression}" if compression else "w" with tarfile.open(mode=mode, fileobj=io) as tf: for name, content in self.files.items(): tarinfo = tarfile.TarInfo(name) tarinfo.size = len(content) tf.addfile(tarinfo, BytesIO(content)) ext = f"tar.{compression}" if compression else "tar" input_snapshot = self.make_snapshot({f"test.{ext}": io.getvalue()}) extracted_digest = self.request_single_product( ExtractedDigest, Params(MaybeExtractable(input_snapshot.digest)) ) files_content = self.request_single_product(FilesContent, Params(extracted_digest.digest)) assert self.expected_files_content == files_content def test_extract_tar(self) -> None: self._do_test_extract_tar("") def test_extract_tar_gz(self) -> None: self._do_test_extract_tar("gz") def test_extract_tar_bz2(self) -> None: self._do_test_extract_tar("bz2") def test_extract_tar_xz(self) -> None: self._do_test_extract_tar("xz") def test_non_archive(self) -> None: input_snapshot = self.make_snapshot({"test.sh": b"# A shell script"}) extracted_digest = self.request_single_product( ExtractedDigest, Params(MaybeExtractable(input_snapshot.digest)) ) files_content = self.request_single_product(FilesContent, Params(extracted_digest.digest)) assert FilesContent([FileContent("test.sh", b"# A shell script")]) == files_content