Esempio n. 1
0
def test_keep_dependencies(cli, datafiles):
    project = str(datafiles)
    element_path = "elements"

    # Skip this test if we do not have support for subsecond precision mtimes
    #
    # The artifact expiry logic relies on mtime changes, in real life second precision
    # should be enough for this to work almost all the time, but test cases happen very
    # quickly, resulting in all artifacts having the same mtime.
    #
    # This test requires subsecond mtime to be reliable.
    #
    if not have_subsecond_mtime(project):
        pytest.skip("Filesystem does not support subsecond mtime precision: {}".format(project))

    cli.configure({"cache": {"quota": 10000000}})

    # Create a pretty big dependency
    create_element_size("dependency.bst", project, element_path, [], 5000000)
    res = cli.run(project=project, args=["build", "dependency.bst"])
    res.assert_success()

    # Now create some other unrelated artifact
    create_element_size("unrelated.bst", project, element_path, [], 4000000)
    res = cli.run(project=project, args=["build", "unrelated.bst"])
    res.assert_success()

    # Check that the correct element remains in the cache
    states = cli.get_element_states(project, ["dependency.bst", "unrelated.bst"])
    assert states["dependency.bst"] == "cached"
    assert states["unrelated.bst"] == "cached"

    # We try to build an element which depends on the LRU artifact,
    # and could therefore fail if we didn't make sure dependencies
    # aren't removed.
    #
    # Since some artifact caches may implement weak cache keys by
    # duplicating artifacts (bad!) we need to make this equal in size
    # or smaller than half the size of its dependencies.
    #
    create_element_size("target.bst", project, element_path, ["dependency.bst"], 2000000)
    res = cli.run(project=project, args=["build", "target.bst"])
    res.assert_success()

    states = cli.get_element_states(project, ["target.bst", "unrelated.bst"])
    assert states["target.bst"] == "cached"
    assert states["dependency.bst"] == "cached"
    assert states["unrelated.bst"] != "cached"
Esempio n. 2
0
def test_no_needless_overwrite(cli, tmpdir, datafiles):
    project = os.path.join(datafiles.dirname, datafiles.basename)
    dev_files_path = os.path.join(project, "files", "dev-files")
    element_path = os.path.join(project, "elements")
    target = "track-test-target.bst"

    # Skip this test if we do not have support for subsecond precision mtimes
    #
    if not have_subsecond_mtime(project):
        pytest.skip(
            "Filesystem does not support subsecond mtime precision: {}".format(
                project))

    # Create our repo object of the given source type with
    # the dev files, and then collect the initial ref.
    #
    repo = create_repo("git", str(tmpdir))
    repo.create(dev_files_path)

    # Write out our test target and assert it exists
    generate_element(repo, os.path.join(element_path, target))
    path_to_target = os.path.join(element_path, target)
    assert os.path.exists(path_to_target)
    creation_mtime = os.path.getmtime(path_to_target)

    # Assert tracking is needed
    states = cli.get_element_states(project, [target])
    assert states[target] == "no reference"

    # Perform the track
    result = cli.run(project=project, args=["source", "track", target])
    result.assert_success()

    track1_mtime = os.path.getmtime(path_to_target)

    assert creation_mtime != track1_mtime

    # Now (needlessly) track again
    result = cli.run(project=project, args=["source", "track", target])
    result.assert_success()

    track2_mtime = os.path.getmtime(path_to_target)

    assert track1_mtime == track2_mtime
Esempio n. 3
0
def test_move_to_empty_dir_set_mtime(src, tmp_path):

    # Skip this test if we do not have support for subsecond precision mtimes
    #
    if not have_subsecond_mtime(str(tmp_path)):
        pytest.skip(
            "Filesystem does not support subsecond mtime precision: {}".format(
                str(tmp_path)))

    dst = tmp_path.joinpath("dst")
    move_atomic(src, dst)
    assert dst.joinpath("test").exists()
    _dst = str(dst)
    # set the mtime via stamp
    timestamp1 = "2020-01-08T11:05:50.832123Z"
    _set_file_mtime(_dst, _parse_timestamp(timestamp1))
    assert timestamp1 == _get_file_mtimestamp(_dst)
    # reset the mtime using an offset stamp
    timestamp2 = "2010-02-12T12:05:50.832123+01:00"
    _set_file_mtime(_dst, _parse_timestamp(timestamp2))
    assert _get_file_mtimestamp(_dst) == "2010-02-12T11:05:50.832123Z"
Esempio n. 4
0
def test_artifact_too_large(cli, datafiles, size):
    project = str(datafiles)
    element_path = "elements"

    # Skip this test if we do not have support for subsecond precision mtimes
    #
    # The artifact expiry logic relies on mtime changes, in real life second precision
    # should be enough for this to work almost all the time, but test cases happen very
    # quickly, resulting in all artifacts having the same mtime.
    #
    # This test requires subsecond mtime to be reliable.
    #
    if not have_subsecond_mtime(project):
        pytest.skip("Filesystem does not support subsecond mtime precision: {}".format(project))

    cli.configure({"cache": {"quota": 400000}})

    # Create an element whose artifact is too large
    create_element_size("target.bst", project, element_path, [], size)
    res = cli.run(project=project, args=["build", "target.bst"])
    res.assert_main_error(ErrorDomain.STREAM, None)
    res.assert_task_error(ErrorDomain.CAS, "cache-too-full")
Esempio n. 5
0
def test_never_delete_required(cli, datafiles):
    project = str(datafiles)
    element_path = "elements"

    # Skip this test if we do not have support for subsecond precision mtimes
    #
    # The artifact expiry logic relies on mtime changes, in real life second precision
    # should be enough for this to work almost all the time, but test cases happen very
    # quickly, resulting in all artifacts having the same mtime.
    #
    # This test requires subsecond mtime to be reliable.
    #
    if not have_subsecond_mtime(project):
        pytest.skip("Filesystem does not support subsecond mtime precision: {}".format(project))

    cli.configure({"cache": {"quota": 10000000}, "scheduler": {"fetchers": 1, "builders": 1}})

    # Create a linear build tree
    create_element_size("dep1.bst", project, element_path, [], 8000000)
    create_element_size("dep2.bst", project, element_path, ["dep1.bst"], 8000000)
    create_element_size("dep3.bst", project, element_path, ["dep2.bst"], 8000000)
    create_element_size("target.bst", project, element_path, ["dep3.bst"], 8000000)

    # Build dep1.bst, which should fit into the cache.
    res = cli.run(project=project, args=["build", "dep1.bst"])
    res.assert_success()

    # We try to build this pipeline, but it's too big for the
    # cache. Since all elements are required, the build should fail.
    res = cli.run(project=project, args=["build", "target.bst"])
    res.assert_main_error(ErrorDomain.STREAM, None)
    res.assert_task_error(ErrorDomain.CAS, "cache-too-full")

    states = cli.get_element_states(project, ["target.bst"])
    assert states["dep1.bst"] == "cached"
    assert states["dep2.bst"] != "cached"
    assert states["dep3.bst"] != "cached"
    assert states["target.bst"] != "cached"
Esempio n. 6
0
def test_artifact_expires(cli, datafiles):
    project = str(datafiles)
    element_path = "elements"

    # Skip this test if we do not have support for subsecond precision mtimes
    #
    # The artifact expiry logic relies on mtime changes, in real life second precision
    # should be enough for this to work almost all the time, but test cases happen very
    # quickly, resulting in all artifacts having the same mtime.
    #
    # This test requires subsecond mtime to be reliable.
    #
    if not have_subsecond_mtime(project):
        pytest.skip("Filesystem does not support subsecond mtime precision: {}".format(project))

    cli.configure({"cache": {"quota": 10000000,}})

    # Create an element that uses almost the entire cache (an empty
    # ostree cache starts at about ~10KiB, so we need a bit of a
    # buffer)
    create_element_size("target.bst", project, element_path, [], 6000000)
    res = cli.run(project=project, args=["build", "target.bst"])
    res.assert_success()

    assert cli.get_element_state(project, "target.bst") == "cached"

    # Our cache should now be almost full. Let's create another
    # artifact and see if we can cause buildstream to delete the old
    # one.
    create_element_size("target2.bst", project, element_path, [], 6000000)
    res = cli.run(project=project, args=["build", "target2.bst"])
    res.assert_success()

    # Check that the correct element remains in the cache
    states = cli.get_element_states(project, ["target.bst", "target2.bst"])
    assert states["target.bst"] != "cached"
    assert states["target2.bst"] == "cached"
Esempio n. 7
0
def _import_test(tmpdir,
                 original,
                 overlay,
                 generator_function,
                 verify_contents=False):

    # Skip this test if we do not have support for subsecond precision mtimes
    #
    if not have_subsecond_mtime(str(tmpdir)):
        pytest.skip(
            "Filesystem does not support subsecond mtime precision: {}".format(
                str(tmpdir)))

    cas_cache = CASCache(tmpdir, log_directory=os.path.join(tmpdir, "logs"))
    try:
        # Create some fake content
        generator_function(original, tmpdir)
        if original != overlay:
            generator_function(overlay, tmpdir)

        d = create_new_casdir(original, cas_cache, tmpdir)

        duplicate_cas = create_new_casdir(original, cas_cache, tmpdir)

        assert duplicate_cas._get_digest().hash == d._get_digest().hash

        d2 = create_new_casdir(overlay, cas_cache, tmpdir)
        d.import_files(d2, properties=["mtime"])
        export_dir = os.path.join(tmpdir,
                                  "output-{}-{}".format(original, overlay))
        roundtrip_dir = os.path.join(
            tmpdir, "roundtrip-{}-{}".format(original, overlay))
        d2.export_files(roundtrip_dir)
        d.export_files(export_dir)

        if verify_contents:
            for item in root_filesets[overlay - 1]:
                (path, typename, content) = item
                realpath = resolve_symlinks(path, export_dir)
                if typename == "F":
                    if os.path.isdir(realpath) and directory_not_empty(
                            realpath):
                        # The file should not have overwritten the directory in this case.
                        pass
                    else:
                        assert os.path.isfile(
                            realpath
                        ), "{} did not exist in the combined virtual directory".format(
                            path)
                        assert file_contents_are(realpath, content)
                        roundtrip = os.path.join(roundtrip_dir, path)
                        assert os.path.getmtime(roundtrip) == MTIME
                        assert os.path.getmtime(realpath) == MTIME

                elif typename == "S":
                    if os.path.isdir(realpath) and directory_not_empty(
                            realpath):
                        # The symlink should not have overwritten the directory in this case.
                        pass
                    else:
                        assert os.path.islink(realpath)
                        assert os.readlink(realpath) == content
                elif typename == "D":
                    # We can't do any more tests than this because it
                    # depends on things present in the original. Blank
                    # directories here will be ignored and the original
                    # left in place.
                    assert os.path.lexists(realpath)

        # Now do the same thing with filebaseddirectories and check the contents match

        duplicate_cas.import_files(roundtrip_dir, properties=["mtime"])

        assert duplicate_cas._get_digest().hash == d._get_digest().hash
    finally:
        cas_cache.release_resources()
Esempio n. 8
0
def test_recently_pulled_artifact_does_not_expire(cli, datafiles, tmpdir):
    project = str(datafiles)
    element_path = "elements"

    # The artifact expiry logic relies on mtime changes, in real life second precision
    # should be enough for this to work almost all the time, but test cases happen very
    # quickly, resulting in all artifacts having the same mtime.
    #
    # This test requires subsecond mtime to be reliable.
    #
    if not have_subsecond_mtime(project):
        pytest.skip(
            "Filesystem does not support subsecond mtime precision: {}".format(
                project))

    # Create an artifact share (remote cache) in tmpdir/artifactshare
    # Set a 22 MB quota
    with create_artifact_share(os.path.join(str(tmpdir), "artifactshare"),
                               quota=int(22e6)) as share:

        # Configure bst to push to the cache
        cli.configure(
            {"artifacts": {
                "servers": [{
                    "url": share.repo,
                    "push": True
                }],
            }})

        # Create and build 2 elements, one 5 MB and one 15 MB.
        create_element_size("element1.bst", project, element_path, [],
                            int(5e6))
        result = cli.run(project=project, args=["build", "element1.bst"])
        result.assert_success()

        create_element_size("element2.bst", project, element_path, [],
                            int(15e6))
        result = cli.run(project=project, args=["build", "element2.bst"])
        result.assert_success()

        # Ensure they are cached locally
        states = cli.get_element_states(project,
                                        ["element1.bst", "element2.bst"])
        assert states == {
            "element1.bst": "cached",
            "element2.bst": "cached",
        }

        # Ensure that they have  been pushed to the cache
        assert_shared(cli, share, project, "element1.bst")
        assert_shared(cli, share, project, "element2.bst")

        # Pull the element1 from the remote cache (this should update its mtime).
        # Use a separate local cache for this to ensure the complete element is pulled.
        cli2_path = os.path.join(str(tmpdir), "cli2")
        cli2 = Cli(cli2_path)
        result = cli2.run(project=project,
                          args=[
                              "artifact", "pull", "element1.bst",
                              "--artifact-remote", share.repo
                          ])
        result.assert_success()

        # Ensure element1 is cached locally
        assert cli2.get_element_state(project, "element1.bst") == "cached"

        wait_for_cache_granularity()

        # Create and build the element3 (of 5 MB)
        create_element_size("element3.bst", project, element_path, [],
                            int(5e6))
        result = cli.run(project=project, args=["build", "element3.bst"])
        result.assert_success()

        # Make sure it's cached locally and remotely
        assert cli.get_element_state(project, "element3.bst") == "cached"
        assert_shared(cli, share, project, "element3.bst")

        # Ensure that element2 was deleted from the share and element1 remains
        assert_not_shared(cli, share, project, "element2.bst")
        assert_shared(cli, share, project, "element1.bst")