def test_keep_dependencies(cli, datafiles): project = str(datafiles) element_path = "elements" # Skip this test if we do not have support for subsecond precision mtimes # # The artifact expiry logic relies on mtime changes, in real life second precision # should be enough for this to work almost all the time, but test cases happen very # quickly, resulting in all artifacts having the same mtime. # # This test requires subsecond mtime to be reliable. # if not have_subsecond_mtime(project): pytest.skip( "Filesystem does not support subsecond mtime precision: {}".format( project)) cli.configure({"cache": {"quota": 10000000}}) # Create a pretty big dependency create_element_size("dependency.bst", project, element_path, [], 5000000) res = cli.run(project=project, args=["build", "dependency.bst"]) res.assert_success() # Now create some other unrelated artifact create_element_size("unrelated.bst", project, element_path, [], 4000000) res = cli.run(project=project, args=["build", "unrelated.bst"]) res.assert_success() # Check that the correct element remains in the cache states = cli.get_element_states(project, ["dependency.bst", "unrelated.bst"]) assert states["dependency.bst"] == "cached" assert states["unrelated.bst"] == "cached" # We try to build an element which depends on the LRU artifact, # and could therefore fail if we didn't make sure dependencies # aren't removed. # # Since some artifact caches may implement weak cache keys by # duplicating artifacts (bad!) we need to make this equal in size # or smaller than half the size of its dependencies. # create_element_size("target.bst", project, element_path, ["dependency.bst"], 2000000) res = cli.run(project=project, args=["build", "target.bst"]) res.assert_success() states = cli.get_element_states(project, ["target.bst", "unrelated.bst"]) assert states["target.bst"] == "cached" assert states["dependency.bst"] == "cached" assert states["unrelated.bst"] != "cached"
def test_never_delete_required(cli, datafiles): project = str(datafiles) element_path = "elements" # Skip this test if we do not have support for subsecond precision mtimes # # The artifact expiry logic relies on mtime changes, in real life second precision # should be enough for this to work almost all the time, but test cases happen very # quickly, resulting in all artifacts having the same mtime. # # This test requires subsecond mtime to be reliable. # if not have_subsecond_mtime(project): pytest.skip( "Filesystem does not support subsecond mtime precision: {}".format( project)) cli.configure({ "cache": { "quota": 10000000 }, "scheduler": { "fetchers": 1, "builders": 1 } }) # Create a linear build tree create_element_size("dep1.bst", project, element_path, [], 8000000) create_element_size("dep2.bst", project, element_path, ["dep1.bst"], 8000000) create_element_size("dep3.bst", project, element_path, ["dep2.bst"], 8000000) create_element_size("target.bst", project, element_path, ["dep3.bst"], 8000000) # Build dep1.bst, which should fit into the cache. res = cli.run(project=project, args=["build", "dep1.bst"]) res.assert_success() # We try to build this pipeline, but it's too big for the # cache. Since all elements are required, the build should fail. res = cli.run(project=project, args=["build", "target.bst"]) res.assert_main_error(ErrorDomain.STREAM, None) res.assert_task_error(ErrorDomain.CAS, "cache-too-full") states = cli.get_element_states(project, ["target.bst"]) assert states["dep1.bst"] == "cached" assert states["dep2.bst"] != "cached" assert states["dep3.bst"] != "cached" assert states["target.bst"] != "cached"
def test_no_needless_overwrite(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) dev_files_path = os.path.join(project, "files", "dev-files") element_path = os.path.join(project, "elements") target = "track-test-target.bst" # Skip this test if we do not have support for subsecond precision mtimes # if not have_subsecond_mtime(project): pytest.skip( "Filesystem does not support subsecond mtime precision: {}".format( project)) # Create our repo object of the given source type with # the dev files, and then collect the initial ref. # repo = Git(str(tmpdir)) repo.create(dev_files_path) # Write out our test target and assert it exists generate_element(repo, os.path.join(element_path, target)) path_to_target = os.path.join(element_path, target) assert os.path.exists(path_to_target) creation_mtime = os.path.getmtime(path_to_target) # Assert tracking is needed states = cli.get_element_states(project, [target]) assert states[target] == "no reference" # Perform the track result = cli.run(project=project, args=["source", "track", target]) result.assert_success() track1_mtime = os.path.getmtime(path_to_target) assert creation_mtime != track1_mtime # Now (needlessly) track again result = cli.run(project=project, args=["source", "track", target]) result.assert_success() track2_mtime = os.path.getmtime(path_to_target) assert track1_mtime == track2_mtime
def test_artifact_expires(cli, datafiles): project = str(datafiles) element_path = "elements" # Skip this test if we do not have support for subsecond precision mtimes # # The artifact expiry logic relies on mtime changes, in real life second precision # should be enough for this to work almost all the time, but test cases happen very # quickly, resulting in all artifacts having the same mtime. # # This test requires subsecond mtime to be reliable. # if not have_subsecond_mtime(project): pytest.skip( "Filesystem does not support subsecond mtime precision: {}".format( project)) cli.configure({"cache": { "quota": 10000000, }}) # Create an element that uses almost the entire cache (an empty # ostree cache starts at about ~10KiB, so we need a bit of a # buffer) create_element_size("target.bst", project, element_path, [], 6000000) res = cli.run(project=project, args=["build", "target.bst"]) res.assert_success() assert cli.get_element_state(project, "target.bst") == "cached" # Our cache should now be almost full. Let's create another # artifact and see if we can cause buildstream to delete the old # one. create_element_size("target2.bst", project, element_path, [], 6000000) res = cli.run(project=project, args=["build", "target2.bst"]) res.assert_success() # Check that the correct element remains in the cache states = cli.get_element_states(project, ["target.bst", "target2.bst"]) assert states["target.bst"] != "cached" assert states["target2.bst"] == "cached"
def test_move_to_empty_dir_set_mtime(src, tmp_path): # Skip this test if we do not have support for subsecond precision mtimes # if not have_subsecond_mtime(str(tmp_path)): pytest.skip( "Filesystem does not support subsecond mtime precision: {}".format( str(tmp_path))) dst = tmp_path.joinpath("dst") move_atomic(src, dst) assert dst.joinpath("test").exists() _dst = str(dst) # set the mtime via stamp timestamp1 = "2020-01-08T11:05:50.832123Z" _set_file_mtime(_dst, _parse_timestamp(timestamp1)) assert timestamp1 == _get_file_mtimestamp(_dst) # reset the mtime using an offset stamp timestamp2 = "2010-02-12T12:05:50.832123+01:00" _set_file_mtime(_dst, _parse_timestamp(timestamp2)) assert _get_file_mtimestamp(_dst) == "2010-02-12T11:05:50.832123Z"
def test_artifact_too_large(cli, datafiles, size): project = str(datafiles) element_path = "elements" # Skip this test if we do not have support for subsecond precision mtimes # # The artifact expiry logic relies on mtime changes, in real life second precision # should be enough for this to work almost all the time, but test cases happen very # quickly, resulting in all artifacts having the same mtime. # # This test requires subsecond mtime to be reliable. # if not have_subsecond_mtime(project): pytest.skip( "Filesystem does not support subsecond mtime precision: {}".format( project)) cli.configure({"cache": {"quota": 400000}}) # Create an element whose artifact is too large create_element_size("target.bst", project, element_path, [], size) res = cli.run(project=project, args=["build", "target.bst"]) res.assert_main_error(ErrorDomain.STREAM, None) res.assert_task_error(ErrorDomain.CAS, "cache-too-full")
def _import_test(tmpdir, original, overlay, generator_function, verify_contents=False): # Skip this test if we do not have support for subsecond precision mtimes # if not have_subsecond_mtime(str(tmpdir)): pytest.skip( "Filesystem does not support subsecond mtime precision: {}".format( str(tmpdir))) cas_cache = CASCache(tmpdir, log_directory=os.path.join(tmpdir, "logs")) try: # Create some fake content generator_function(original, tmpdir) if original != overlay: generator_function(overlay, tmpdir) d = create_new_casdir(original, cas_cache, tmpdir) duplicate_cas = create_new_casdir(original, cas_cache, tmpdir) assert duplicate_cas._get_digest().hash == d._get_digest().hash d2 = create_new_casdir(overlay, cas_cache, tmpdir) d._import_files_internal(d2, properties=["mtime"]) export_dir = os.path.join(tmpdir, "output-{}-{}".format(original, overlay)) roundtrip_dir = os.path.join( tmpdir, "roundtrip-{}-{}".format(original, overlay)) d2._export_files(roundtrip_dir) d._export_files(export_dir) if verify_contents: for item in root_filesets[overlay - 1]: (path, typename, content) = item realpath = resolve_symlinks(path, export_dir) if typename == "F": if os.path.isdir(realpath) and directory_not_empty( realpath): # The file should not have overwritten the directory in this case. pass else: assert os.path.isfile( realpath ), "{} did not exist in the combined virtual directory".format( path) assert file_contents_are(realpath, content) roundtrip = os.path.join(roundtrip_dir, path) assert os.path.getmtime(roundtrip) == MTIME assert os.path.getmtime(realpath) == MTIME elif typename == "S": if os.path.isdir(realpath) and directory_not_empty( realpath): # The symlink should not have overwritten the directory in this case. pass else: assert os.path.islink(realpath) assert os.readlink(realpath) == content elif typename == "D": # We can't do any more tests than this because it # depends on things present in the original. Blank # directories here will be ignored and the original # left in place. assert os.path.lexists(realpath) # Now do the same thing with filebaseddirectories and check the contents match duplicate_cas._import_files_internal(roundtrip_dir, properties=["mtime"]) assert duplicate_cas._get_digest().hash == d._get_digest().hash finally: cas_cache.release_resources()