Пример #1
0
def test_with_cache(blobs_digests):
    blobs, digests = blobs_digests
    blob1, blob2, blob3, *_ = blobs
    digest1, digest2, digest3, *_ = digests

    cache = LRUMemoryCache(256)
    fallback = LRUMemoryCache(256)
    with_cache_storage = WithCacheStorage(cache, fallback)

    assert not with_cache_storage.has_blob(digest1)
    write(with_cache_storage, digest1, blob1)
    assert cache.has_blob(digest1)
    assert fallback.has_blob(digest1)
    assert with_cache_storage.get_blob(digest1).read() == blob1

    # Even if a blob is in cache, we still need to check if the fallback
    # has it.
    write(cache, digest2, blob2)
    assert not with_cache_storage.has_blob(digest2)
    write(fallback, digest2, blob2)
    assert with_cache_storage.has_blob(digest2)

    # When a blob is in the fallback but not the cache, reading it should
    # put it into the cache.
    write(fallback, digest3, blob3)
    assert with_cache_storage.get_blob(digest3).read() == blob3
    assert cache.has_blob(digest3)
    assert cache.get_blob(digest3).read() == blob3
    assert cache.has_blob(digest3)
Пример #2
0
def any_index(request):
    if request.param == 'sql':
        storage = LRUMemoryCache(256)
        with tempfile.NamedTemporaryFile() as db:
            yield SQLIndex(storage=storage,
                           connection_string="sqlite:///%s" % db.name,
                           automigrate=True)
Пример #3
0
def any_storage(request):
    if request.param == 'lru':
        yield LRUMemoryCache(256)
    elif request.param == 'disk':
        with tempfile.TemporaryDirectory() as path:
            yield DiskStorage(path)
    elif request.param == 's3':
        with mock_s3():
            auth_args = {
                "aws_access_key_id": "access_key",
                "aws_secret_access_key": "secret_key"
            }
            boto3.resource('s3', **auth_args).create_bucket(Bucket='testing')
            yield S3Storage('testing')
    elif request.param == 'lru_disk':
        # LRU cache with a uselessly small limit, so requests always fall back
        with tempfile.TemporaryDirectory() as path:
            yield WithCacheStorage(LRUMemoryCache(1), DiskStorage(path))
    elif request.param == 'disk_s3':
        # Disk-based cache of S3, but we don't delete files, so requests
        # are always handled by the cache
        with tempfile.TemporaryDirectory() as path:
            with mock_s3():
                auth_args = {
                    "aws_access_key_id": "access_key",
                    "aws_secret_access_key": "secret_key"
                }
                boto3.resource('s3',
                               **auth_args).create_bucket(Bucket='testing')
                yield WithCacheStorage(DiskStorage(path), S3Storage('testing'))
    elif request.param == 'remote':
        with serve_cas(['testing']) as server:
            yield server.remote
    elif request.param == 'redis':
        with patch('buildgrid.server.cas.storage.redis.redis.Redis',
                   fakeredis.FakeRedis):
            from buildgrid.server.cas.storage.redis import RedisStorage

            input_dict = {'host': "localhost", 'port': 8000, 'db': 0}
            yield RedisStorage(**input_dict)
    elif request.param == 'sql_index':
        storage = LRUMemoryCache(256)
        with tempfile.NamedTemporaryFile() as db:
            yield SQLIndex(storage=storage,
                           connection_string="sqlite:///%s" % db.name,
                           automigrate=True)
Пример #4
0
def test_action_cache_allow_updates_capabilities(instance):
    storage = LRUMemoryCache(limit=256)
    action_cache = ActionCache(storage,
                               max_cached_refs=256,
                               allow_updates=True)

    with serve_capabilities_service(
        [instance], action_cache_instance=action_cache) as server:
        server_interface = ServerInterface(server.remote)
        response = server_interface.get_capabilities(instance)

        assert response.cache_capabilities.action_cache_update_capabilities.update_enabled
Пример #5
0
def test_execution_available_capabilities(instance):
    storage = LRUMemoryCache(1024 * 1024)
    data_store = MemoryDataStore(storage)
    controller = ExecutionController(data_store, storage=storage)

    with serve_capabilities_service(
        [instance],
            execution_instance=controller.execution_instance) as server:
        server_interface = ServerInterface(server.remote)
        response = server_interface.get_capabilities(instance)

        assert response.execution_capabilities.exec_enabled
        assert response.execution_capabilities.digest_function
Пример #6
0
def test_lru_eviction(blobs_digests):
    blobs, digests = blobs_digests
    blob1, blob2, blob3, *_ = blobs
    digest1, digest2, digest3, *_ = digests

    lru = LRUMemoryCache(8)
    write(lru, digest1, blob1)
    write(lru, digest2, blob2)
    assert lru.has_blob(digest1)
    assert lru.has_blob(digest2)

    write(lru, digest3, blob3)
    # Check that the LRU evicted blob1 (it was written first)
    assert not lru.has_blob(digest1)
    assert lru.has_blob(digest2)
    assert lru.has_blob(digest3)

    assert lru.get_blob(digest2).read() == blob2
    write(lru, digest1, blob1)
    # Check that the LRU evicted blob3 (since we just read blob2)
    assert lru.has_blob(digest1)
    assert lru.has_blob(digest2)
    assert not lru.has_blob(digest3)

    assert lru.has_blob(digest2)
    write(lru, digest3, blob1)
    # Check that the LRU evicted blob1 (since we just checked blob3)
    assert not lru.has_blob(digest1)
    assert lru.has_blob(digest2)
    assert lru.has_blob(digest3)
Пример #7
0
 def __new__(cls, size):
     return LRUMemoryCache(_parse_size(size))