def test_source_checkout(tmpdir, datafiles, cli): project_dir = os.path.join(str(tmpdir), "project") element_path = "elements" cache_dir = os.path.join(str(tmpdir), "cache") source_dir = os.path.join(cache_dir, "sources") cli.configure( {"cachedir": cache_dir,} ) target_dir = os.path.join(str(tmpdir), "target") repo = create_element_size("target.bst", project_dir, element_path, [], 100000) # check implicit fetching res = cli.run(project=project_dir, args=["source", "checkout", "--directory", target_dir, "target.bst"]) res.assert_success() assert "Fetching from" in res.stderr # remove the directory and check source checkout works with sources only in # the CAS shutil.rmtree(repo.repo) shutil.rmtree(target_dir) shutil.rmtree(source_dir) res = cli.run(project=project_dir, args=["source", "checkout", "--directory", target_dir, "target.bst"]) res.assert_success() assert "Fetching from" not in res.stderr # remove the CAS and check it doesn't work again shutil.rmtree(target_dir) shutil.rmtree(os.path.join(cache_dir, "cas")) res = cli.run(project=project_dir, args=["source", "checkout", "--directory", target_dir, "target.bst"]) res.assert_task_error(ErrorDomain.PLUGIN, None)
def test_workspace_open_no_source_push(tmpdir, datafiles, cli): project_dir = os.path.join(str(tmpdir), "project") element_path = "elements" cache_dir = os.path.join(str(tmpdir), "cache") share_dir = os.path.join(str(tmpdir), "share") workspace = os.path.join(cli.directory, "workspace") with create_artifact_share(share_dir) as share: cli.configure( { "cachedir": cache_dir, "scheduler": {"pushers": 1}, "source-caches": {"servers": [{"url": share.repo, "push": True,}]}, } ) # Fetch as in previous test and check it pushes the source create_element_size("target.bst", project_dir, element_path, [], 10000) res = cli.run(project=project_dir, args=["build", "target.bst"]) res.assert_success() assert "Fetching from" in res.stderr assert "Pushed source" in res.stderr # clear the cas and open a workspace shutil.rmtree(os.path.join(cache_dir, "cas")) res = cli.run(project=project_dir, args=["workspace", "open", "target.bst", "--directory", workspace]) res.assert_success() # Check that this time it does not push the sources res = cli.run(project=project_dir, args=["build", "target.bst"]) res.assert_success() assert "Pushed source" not in res.stderr
def test_keys_stable_over_targets(cli, datafiles): root_element = "elements/key-stability/top-level.bst" target1 = "elements/key-stability/t1.bst" target2 = "elements/key-stability/t2.bst" project = str(datafiles) full_graph_result = cli.run( project=project, args=["show", "--format", "%{name}::%{full-key}", root_element]) full_graph_result.assert_success() all_cache_keys = _parse_output_keys(full_graph_result.output) ordering1_result = cli.run( project=project, args=["show", "--format", "%{name}::%{full-key}", target1, target2]) ordering1_result.assert_success() ordering1_cache_keys = _parse_output_keys(ordering1_result.output) ordering2_result = cli.run( project=project, args=["show", "--format", "%{name}::%{full-key}", target2, target1]) ordering2_result.assert_success() ordering2_cache_keys = _parse_output_keys(ordering2_result.output) elements = ordering1_cache_keys.keys() assert {key: ordering2_cache_keys[key] for key in elements} == ordering1_cache_keys assert {key: all_cache_keys[key] for key in elements} == ordering1_cache_keys
def test_staged_source_build(tmpdir, datafiles, cli): project_dir = os.path.join(datafiles.dirname, datafiles.basename, "project") cachedir = os.path.join(str(tmpdir), "cache") element_path = "elements" source_protos = os.path.join(str(tmpdir), "cache", "source_protos") elementsources = os.path.join(str(tmpdir), "cache", "elementsources") source_dir = os.path.join(str(tmpdir), "cache", "sources") cli.configure({"cachedir": cachedir}) create_element_size("target.bst", project_dir, element_path, [], 10000) with dummy_context() as context: context.cachedir = cachedir project = Project(project_dir, context) project.ensure_fully_loaded() element = project.load_elements(["import-dev.bst"])[0] # check consistency of the source element._query_source_cache() assert not element._cached_sources() res = cli.run(project=project_dir, args=["build", "target.bst"]) res.assert_success() # delete artifacts check state is buildable cli.remove_artifact_from_cache(project_dir, "target.bst") states = cli.get_element_states(project_dir, ["target.bst"]) assert states["target.bst"] == "buildable" # delete source dir and check that state is still buildable shutil.rmtree(source_dir) states = cli.get_element_states(project_dir, ["target.bst"]) assert states["target.bst"] == "buildable" # build and check that no fetching was done. res = cli.run(project=project_dir, args=["build", "target.bst"]) res.assert_success() assert "Fetching from" not in res.stderr # assert the source directory is still empty (though there may be # directories from staging etc.) files = [] for _, _, filename in os.walk(source_dir): files.extend(filename) assert files == [] # Now remove the source refs and check the state shutil.rmtree(source_protos) shutil.rmtree(elementsources) cli.remove_artifact_from_cache(project_dir, "target.bst") states = cli.get_element_states(project_dir, ["target.bst"]) assert states["target.bst"] == "fetch needed" # Check that it now fetches from when building the target res = cli.run(project=project_dir, args=["build", "target.bst"]) res.assert_success() assert "Fetching from" in res.stderr
def test_source_cache_key(cli, datafiles): project_dir = str(datafiles) file_path = os.path.join(project_dir, "files") file_url = "file://" + file_path element_path = os.path.join(project_dir, "elements") element_name = "key_check.bst" element = { "kind": "import", "sources": [ { "kind": "remote", "url": os.path.join(file_url, "bin-files", "usr", "bin", "hello"), "directory": "usr/bin", }, { "kind": "remote", "url": os.path.join(file_url, "dev-files", "usr", "include", "pony.h"), "directory": "usr/include", }, { "kind": "patch", "path": "files/hello-patch.diff" }, ], } _yaml.roundtrip_dump(element, os.path.join(element_path, element_name)) res = cli.run(project=project_dir, args=["source", "track", element_name]) res.assert_success() res = cli.run(project=project_dir, args=["build", element_name]) res.assert_success() # Should have one source ref patch_protos = os.path.join(project_dir, "cache", "source_protos", "patch") assert len(os.listdir(patch_protos)) == 1 # modify hello-patch file and check tracking updates refs with open(os.path.join(file_path, "dev-files", "usr", "include", "pony.h"), "a") as f: f.write("\nappending nonsense") res = cli.run(project=project_dir, args=["source", "track", element_name]) res.assert_success() assert "Found new revision: " in res.stderr res = cli.run(project=project_dir, args=["source", "fetch", element_name]) res.assert_success() # We should have a new source ref assert len(os.listdir(patch_protos)) == 2
def test_variables_are_resolved(cli, tmpdir, datafiles): project = str(datafiles) checkoutdir = os.path.join(str(tmpdir), "checkout") # Build, checkout result = cli.run(project=project, args=["build", "target.bst"]) result.assert_success() result = cli.run(project=project, args=["artifact", "checkout", "target.bst", "--directory", checkoutdir]) result.assert_success() # Check that the checkout contains the expected file assert os.path.exists(os.path.join(checkoutdir, "file.txt"))
def test_compound_or_conditional(cli, datafiles, debug, logging, expected): project = os.path.join(datafiles.dirname, datafiles.basename, "compound-or-condition") # Test with the opt option set result = cli.run( project=project, silent=True, args=[ "--option", "debug", debug, "--option", "logging", logging, "show", "--deps", "none", "--format", "%{vars}", "element.bst", ], ) result.assert_success() loaded = _yaml.load_data(result.output) assert loaded.get_str("logging") == expected
def test_deep_references(cli, datafiles, maxvars): project = str(datafiles) # Generate an element with very, very many variables to resolve, # each which expand to the value of the previous variable. # # The bottom variable defines a test value which we check for # in the top variable in `bst show` output. # topvar = "var{}".format(maxvars) bottomvar = "var0" testvalue = "testvalue {}".format(maxvars) # Generate variables = { "var{}".format(idx + 1): "%{var" + str(idx) + "}" for idx in range(maxvars) } variables[bottomvar] = testvalue element = {"kind": "manual", "variables": variables} _yaml.roundtrip_dump(element, os.path.join(project, "test.bst")) # Run `bst show` result = cli.run(project=project, args=["show", "--format", "%{vars}", "test.bst"]) result.assert_success() # Test results result_vars = _yaml.load_data(result.output) assert result_vars.get_str(topvar) == testvalue
def test_missing_certs(cli, datafiles, config_key, config_value): project = os.path.join(datafiles.dirname, datafiles.basename, "missing-certs") project_conf = { "name": "test", "remote-execution": { "execution-service": { "url": "http://localhost:8088" }, "storage-service": { "url": "http://charactron:11001", config_key: config_value, }, }, } project_conf_file = os.path.join(project, "project.conf") _yaml.roundtrip_dump(project_conf, project_conf_file) # Use `pull` here to ensure we try to initialize the remotes, triggering the error # # This does not happen for a simple `bst show`. result = cli.run(project=project, args=["show", "element.bst"]) result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.INVALID_DATA, "Your config is missing")
def test_assertion_cli(cli, datafiles, target, opt_pony, opt_horsy, assertion): project = str(datafiles) result = cli.run( project=project, silent=True, args=[ "--option", "pony", opt_pony, "--option", "horsy", opt_horsy, "show", "--deps", "none", "--format", "%{vars}", target, ], ) result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.USER_ASSERTION) # Assert that the assertion text provided by the user # is found in the exception text assert assertion in str(result.exception)
def test_old_and_new_configs(cli, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename, "missing-certs") project_conf = { "name": "test", "remote-execution": { "url": "https://cache.example.com:12345", "execution-service": { "url": "http://localhost:8088" }, "storage-service": { "url": "http://charactron:11001", }, }, } project_conf_file = os.path.join(project, "project.conf") _yaml.roundtrip_dump(project_conf, project_conf_file) # Use `pull` here to ensure we try to initialize the remotes, triggering the error # # This does not happen for a simple `bst show`. result = cli.run(project=project, args=["artifact", "pull", "element.bst"]) result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.INVALID_DATA, "specify one")
def test_use_of_protected_var_element_overrides(cli, datafiles, protected_var): project = str(datafiles) conf = { "name": "test", "min-version": "2.0", "elements": { "manual": { "variables": { protected_var: "some-value" } } } } _yaml.roundtrip_dump(conf, os.path.join(project, "project.conf")) element = { "kind": "manual", "sources": [{ "kind": "local", "path": "foo.txt" }], } _yaml.roundtrip_dump(element, os.path.join(project, "target.bst")) result = cli.run(project=project, args=["build", "target.bst"]) result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.PROTECTED_VARIABLE_REDEFINED)
def test_source_staged(tmpdir, cli, datafiles): project_dir = os.path.join(datafiles.dirname, datafiles.basename, "project") cachedir = os.path.join(str(tmpdir), "cache") cli.configure({"cachedir": cachedir}) res = cli.run(project=project_dir, args=["build", "import-bin.bst"]) res.assert_success() with dummy_context() as context: context.cachedir = cachedir # load project and sourcecache project = Project(project_dir, context) project.ensure_fully_loaded() sourcecache = context.sourcecache cas = context.get_cascache() # now check that the source is in the refs file, this is pretty messy but # seems to be the only way to get the sources? element = project.load_elements(["import-bin.bst"])[0] element._query_source_cache() source = list(element.sources())[0] assert element._cached_sources() assert sourcecache.contains(source) # Extract the file and check it's the same as the one we imported digest = sourcecache.export(source)._get_digest() extractdir = os.path.join(str(tmpdir), "extract") cas.checkout(extractdir, digest) dir1 = extractdir dir2 = os.path.join(project_dir, "files", "bin-files") assert list(relative_walk(dir1)) == list(relative_walk(dir2))
def test_invalid_variable_name(cli, datafiles, project_dir): project = os.path.join(datafiles.dirname, datafiles.basename, project_dir) result = cli.run(project=project, silent=True, args=["show", "element.bst"]) result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.INVALID_SYMBOL_NAME)
def test_overlaps_whitelist_ignored(cli, datafiles): project_dir = str(datafiles) gen_project(project_dir, False) result = cli.run(project=project_dir, silent=True, args=["build", "collect-whitelisted.bst"]) result.assert_success()
def test_overlaps(cli, datafiles, use_fatal_warnings): project_dir = str(datafiles) gen_project(project_dir, False, use_fatal_warnings) result = cli.run(project=project_dir, silent=True, args=["build", "collect.bst"]) result.assert_success()
def test_invalid_value_config(cli, datafiles, config_option): project = os.path.join(datafiles.dirname, datafiles.basename, "option-bool") cli.configure({"projects": {"test": {"options": {"pony": config_option}}}}) result = cli.run( project=project, silent=True, args=["show", "--deps", "none", "--format", "%{vars}", "element.bst"] ) result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.INVALID_DATA)
def test_overlap_subproject(cli, tmpdir, datafiles, project_policy, subproject_policy): project_dir = str(datafiles) subproject_dir = os.path.join(project_dir, "sub-project") junction_path = os.path.join(project_dir, "sub-project.bst") gen_project(project_dir, bool(project_policy == "fail"), project_name="test") gen_project(subproject_dir, bool(subproject_policy == "fail"), project_name="subtest") generate_junction(tmpdir, subproject_dir, junction_path) # Here we have a dependency chain where the project element # always overlaps with the subproject element. # # Test that overlap error vs warning policy for this overlap # is always controlled by the project and not the subproject. # result = cli.run(project=project_dir, silent=True, args=["build", "sub-collect.bst"]) if project_policy == "fail": result.assert_main_error(ErrorDomain.STREAM, None) result.assert_task_error(ErrorDomain.PLUGIN, CoreWarnings.OVERLAPS) else: result.assert_success() assert "WARNING [overlaps]" in result.stderr
def test_source_fetch(tmpdir, cli, datafiles): project_dir = os.path.join(datafiles.dirname, datafiles.basename, "project") cachedir = os.path.join(str(tmpdir), "cache") cli.configure({"cachedir": cachedir}) res = cli.run(project=project_dir, args=["source", "fetch", "import-dev.bst"]) res.assert_success() with dummy_context() as context: context.cachedir = cachedir # load project and sourcecache project = Project(project_dir, context) project.ensure_fully_loaded() cas = context.get_cascache() sourcecache = context.sourcecache element = project.load_elements(["import-dev.bst"])[0] element._query_source_cache() source = list(element.sources())[0] assert element._cached_sources() # check that the directory structures are identical digest = sourcecache.export(source)._get_digest() extractdir = os.path.join(str(tmpdir), "extract") cas.checkout(extractdir, digest) dir1 = extractdir dir2 = os.path.join(project_dir, "files", "dev-files") assert list(relative_walk(dir1)) == list(relative_walk(dir2))
def test_compositied_node_fails_usefully(cli, datafiles, element, location): project = str(datafiles) result = cli.run(project=project, args=["show", element]) result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.INVALID_DATA) assert "synthetic node" not in result.stderr assert "{} [{}]: Dictionary did not contain expected key 'path'".format(element, location) in result.stderr
def test_override(cli, datafiles, mount_devices): project = os.path.join(datafiles.dirname, datafiles.basename, "option-list-directive") bst_args = ["--option", "shell_mount_devices", mount_devices, "build"] result = cli.run(project=project, silent=True, args=bst_args) result.assert_success()
def test_overlaps_whitelisted(cli, datafiles, error): project_dir = str(datafiles) gen_project(project_dir, error) result = cli.run(project=project_dir, silent=True, args=["build", "collect-whitelisted.bst"]) result.assert_success() assert "WARNING [overlaps]" not in result.stderr
def test_overlaps_error(cli, datafiles, use_fatal_warnings): project_dir = str(datafiles) gen_project(project_dir, True, use_fatal_warnings) result = cli.run(project=project_dir, silent=True, args=["build", "collect.bst"]) result.assert_main_error(ErrorDomain.STREAM, None) result.assert_task_error(ErrorDomain.PLUGIN, CoreWarnings.OVERLAPS)
def test_partial_context_junctions(cli, datafiles): project = str(datafiles) result = cli.run(project=project, args=["show", "--format", "%{vars}", "test.bst"]) result.assert_success() result_vars = _yaml.load_data(result.output) assert result_vars.get_str("eltvar") == "/bar/foo/baz"
def test_invalid_value_cli(cli, datafiles, cli_option): project = os.path.join(datafiles.dirname, datafiles.basename, "option-bool") result = cli.run( project=project, silent=True, args=["--option", "pony", cli_option, "show", "--deps", "none", "--format", "%{vars}", "element.bst"], ) result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.INVALID_DATA)
def test_conditional_config(cli, datafiles, target, option, expected): project = os.path.join(datafiles.dirname, datafiles.basename, "option-bool") cli.configure({"projects": {"test": {"options": {"pony": option}}}}) result = cli.run(project=project, silent=True, args=["show", "--deps", "none", "--format", "%{vars}", target]) result.assert_success() loaded = _yaml.load_data(result.output) assert loaded.get_str("thepony") == expected
def test_variables_resolving_errors_in_public_section(cli, datafiles): project = str(datafiles) result = cli.run( project=project, args=["show", "--format", "%{public}", "public_unresolved.bst"]) result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.UNRESOLVED_VARIABLE)
def test_invalid_condition(cli, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename, "invalid-condition") result = cli.run( project=project, silent=True, args=["show", "--deps", "none", "--format", "%{vars}", "element.bst"]) result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.INVALID_DATA)
def test_unsupported_arch(cli, datafiles): with override_platform_uname(system="ULTRIX"): project = os.path.join(datafiles.dirname, datafiles.basename, "option-os") result = cli.run( project=project, silent=True, args=["show", "--deps", "none", "--format", "%{vars}", "element.bst"] ) result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.INVALID_DATA)
def test_overlaps_script(cli, datafiles, use_fatal_warnings): # Test overlaps with script element to test # Element.stage_dependency_artifacts() with Scope.RUN project_dir = str(datafiles) gen_project(project_dir, False, use_fatal_warnings) result = cli.run(project=project_dir, silent=True, args=["build", "script.bst"]) result.assert_success()