Beispiel #1
0
 def __init__(self, cache_path: str = ""):
     self._data = dc.FanoutCache(cache_path, shards=8, size_limit=int(float(cache_size.get())))
     self._hit = 0
     self._miss = 0
     if self.version < cache_version:
         self._data.clear()
         self.version = cache_version
Beispiel #2
0
def test_incr_concurrent():
    count = 16
    limit = 500

    with dc.FanoutCache('tmp', timeout=0.001) as cache:
        threads = [
            threading.Thread(target=stress_incr, args=(cache, limit))
            for _ in range(count)
        ]

        for thread in threads:
            thread.start()

        for thread in threads:
            thread.join()

    with dc.FanoutCache('tmp') as cache:
        assert cache.get(b'key') == count * limit
        cache.check()

    shutil.rmtree('tmp', ignore_errors=True)
Beispiel #3
0
def test_copy():
    cache_dir1 = op.join('tmp', 'foo')

    with dc.FanoutCache(cache_dir1) as cache1:
        for count in range(10):
            cache1[count] = str(count)

        for count in range(10, 20):
            cache1[count] = str(count) * int(1e5)

    cache_dir2 = op.join('tmp', 'bar')
    shutil.copytree(cache_dir1, cache_dir2)

    with dc.FanoutCache(cache_dir2) as cache2:
        for count in range(10):
            assert cache2[count] == str(count)

        for count in range(10, 20):
            assert cache2[count] == str(count) * int(1e5)

    shutil.rmtree('tmp', ignore_errors=True)
def test_copy():
    cache_dir1 = tempfile.mkdtemp()

    with dc.FanoutCache(cache_dir1) as cache1:
        for count in range(10):
            cache1[count] = str(count)

        for count in range(10, 20):
            cache1[count] = str(count) * int(1e5)

    cache_dir2 = tempfile.mkdtemp()
    shutil.rmtree(cache_dir2)
    shutil.copytree(cache_dir1, cache_dir2)

    with dc.FanoutCache(cache_dir2) as cache2:
        for count in range(10):
            assert cache2[count] == str(count)

        for count in range(10, 20):
            assert cache2[count] == str(count) * int(1e5)

    shutil.rmtree(cache_dir1, ignore_errors=True)
    shutil.rmtree(cache_dir2, ignore_errors=True)
def test_custom_filename_disk():
    with dc.FanoutCache(disk=SHA256FilenameDisk) as cache:
        for count in range(100, 200):
            key = str(count).encode('ascii')
            cache[key] = str(count) * int(1e5)

    disk = SHA256FilenameDisk(cache.directory)

    for count in range(100, 200):
        key = str(count).encode('ascii')
        subdir = '%03d' % (disk.hash(key) % 8)
        filename = hashlib.sha256(key).hexdigest()[:32]
        full_path = op.join(cache.directory, subdir, filename)

        with open(full_path) as reader:
            content = reader.read()
            assert content == str(count) * int(1e5)

    shutil.rmtree(cache.directory, ignore_errors=True)
def test_add_concurrent():
    with dc.FanoutCache(shards=1) as cache:
        results = co.deque()
        limit = 1000

        threads = [
            threading.Thread(target=stress_add, args=(cache, limit, results))
            for _ in range(16)
        ]

        for thread in threads:
            thread.start()

        for thread in threads:
            thread.join()

        assert sum(results) == limit
        cache.check()
    shutil.rmtree(cache.directory, ignore_errors=True)
Beispiel #7
0
def read(clear=False):
    "Read item from reading list."
    import biplist  # pylint: disable=import-error
    import diskcache  # pylint: disable=import-error
    import os.path
    import webbrowser

    home_dir = os.path.expanduser('~')
    gj_dir = os.path.join(home_dir, '.gj')
    cache_dir = os.path.join(gj_dir, 'diskcache')
    cache = diskcache.FanoutCache(cache_dir)
    reading_list_index = cache.index('reading-list')

    if clear:
        reading_list_index.clear()
        return

    data = biplist.readPlist('/Users/grantj/Library/Safari/Bookmarks.plist')

    reading_list_all = lookup(
        data,
        'Children',
        ('Title', 'com.apple.ReadingList'),
        'Children'
    )

    reading_list = [
        item for item in reading_list_all
        if 'DateLastViewed' not in item['ReadingList']
    ]

    reading_list.sort(key=lambda item: item['ReadingList']['DateAdded'])

    for item in reading_list:
        url = item['URLString']

        if url not in reading_list_index:
            reading_list_index[url] = 1
            webbrowser.open(url)
            break
    else:
        print(u'Nothing to read \N{white frowning face}')
Beispiel #8
0
def main():
    import argparse

    parser = argparse.ArgumentParser()
    parser.add_argument('--cache-dir', default='/tmp/test')
    parser.add_argument('--iterations', type=int, default=100)
    parser.add_argument('--sleep', type=float, default=0.1)
    parser.add_argument('--size', type=int, default=25)
    args = parser.parse_args()

    data = dc.FanoutCache(args.cache_dir)
    delays = []
    values = {str(num): num for num in range(args.size)}
    iterations = args.iterations

    for i in range(args.iterations):
        print(f'Iteration {i + 1}/{iterations}', end='\r')
        time.sleep(args.sleep)
        for key, value in values.items():
            start = time.monotonic()
            data[key] = value
            stop = time.monotonic()
            diff = stop - start
            delays.append(diff)

    # Discard warmup delays, first two iterations.
    del delays[:(len(values) * 2)]

    # Convert seconds to microseconds.
    delays = sorted(delay * 1e6 for delay in delays)

    # Display performance.
    print()
    print(f'Total #:  {len(delays)}')
    print(f'Min delay (us): {delays[0]:>8.3f}')
    print(f'50th %ile (us): {delays[int(len(delays) * 0.50)]:>8.3f}')
    print(f'90th %ile (us): {delays[int(len(delays) * 0.90)]:>8.3f}')
    print(f'99th %ile (us): {delays[int(len(delays) * 0.99)]:>8.3f}')
    print(f'Max delay (us): {delays[-1]:>8.3f}')
Beispiel #9
0
 def wrapper():
     shutil.rmtree('tmp', ignore_errors=True)
     with dc.FanoutCache('tmp') as cache:
         func(cache)
     shutil.rmtree('tmp', ignore_errors=True)
Beispiel #10
0
"""
See https://docs.docker.com/registry/spec/api/#overview
and https://docs.docker.com/registry/spec/manifest-v2-2/#example-manifest-list
"""
import flask
import pathlib
import json
import subprocess
import hashlib
import diskcache

app = flask.Flask(__name__)

dcache = diskcache.FanoutCache('/tmp/simci-cache/',
                               shards=4,
                               timeout=2,
                               eviction_policy='least-recently-used')

EMPTY_MANIFESTS = {
    "schemaVersion": 2,
    "mediaType": "application/vnd.docker.distribution.manifest.v2+json",
}


def _git_checkout(sha):
    # TODO(tom): ensure clone if repo not yet cloned.
    # TODO(tom): cache
    tar = subprocess.check_output([
        'git', '--git-dir', '/home/tom/src/nixpkgs/.git', 'archive',
        '--prefix', 'ar/', sha
    ])
Beispiel #11
0
 def prepare(self):
     cache = diskcache.FanoutCache(self.args.cache_path,
                                   shards=self.shards,
                                   eviction_policy='none')
     self.cache = diskcache.Index.fromcache(cache)
def test_rsync():
    try:
        run(['rsync', '--version'])
    except OSError:
        return  # No rsync installed. Skip test.

    rsync_args = ['rsync', '-a', '--checksum', '--delete', '--stats']
    cache_dir1 = tempfile.mkdtemp() + os.sep
    cache_dir2 = tempfile.mkdtemp() + os.sep

    # Store some items in cache_dir1.

    with dc.FanoutCache(cache_dir1) as cache1:
        for count in range(100):
            cache1[count] = str(count)

        for count in range(100, 200):
            cache1[count] = str(count) * int(1e5)

    # Rsync cache_dir1 to cache_dir2.

    run(rsync_args + [cache_dir1, cache_dir2])

    # Validate items in cache_dir2.

    with dc.FanoutCache(cache_dir2) as cache2:
        for count in range(100):
            assert cache2[count] == str(count)

        for count in range(100, 200):
            assert cache2[count] == str(count) * int(1e5)

    # Store more items in cache_dir2.

    with dc.FanoutCache(cache_dir2) as cache2:
        for count in range(200, 300):
            cache2[count] = str(count)

        for count in range(300, 400):
            cache2[count] = str(count) * int(1e5)

    # Rsync cache_dir2 to cache_dir1.

    run(rsync_args + [cache_dir2, cache_dir1])

    # Validate items in cache_dir1.

    with dc.FanoutCache(cache_dir1) as cache1:
        for count in range(100):
            assert cache1[count] == str(count)

        for count in range(100, 200):
            assert cache1[count] == str(count) * int(1e5)

        for count in range(200, 300):
            assert cache1[count] == str(count)

        for count in range(300, 400):
            assert cache1[count] == str(count) * int(1e5)

    shutil.rmtree(cache_dir1, ignore_errors=True)
    shutil.rmtree(cache_dir2, ignore_errors=True)
def cache():
    with dc.FanoutCache() as cache:
        yield cache
    shutil.rmtree(cache.directory, ignore_errors=True)
$ python -m IPython tests/benchmark_kv_store.py

"""

from IPython import get_ipython

import diskcache

ipython = get_ipython()
assert ipython is not None, 'No IPython! Run with $ ipython ...'

value = 'value'

print('diskcache set')
dc = diskcache.FanoutCache('/tmp/diskcache')
ipython.magic("timeit -n 100 -r 7 dc['key'] = value")
print('diskcache get')
ipython.magic("timeit -n 100 -r 7 dc['key']")
print('diskcache set/delete')
ipython.magic("timeit -n 100 -r 7 dc['key'] = value; del dc['key']")

try:
    import dbm.gnu  # Only trust GNU DBM
except ImportError:
    print('Error: Cannot import dbm.gnu')
    print('Error: Skipping import shelve')
else:
    print('dbm set')
    d = dbm.gnu.open('/tmp/dbm', 'c')
    ipython.magic("timeit -n 100 -r 7 d['key'] = value; d.sync()")
        malingnant_tensor = torch.from_numpy(
            np.array([isMalignant_bool], dtype=np.float32))

        # dim=4, Channel x Index x Row x Col
        ct_tensor = ct_tensor.unsqueeze(0)

        # Unpacked as: input_tensor, answer_int, series_uid, center_irc
        return ct_tensor, malignant_tensor, series_uid, center_irc


# In[8]:

import functools
import diskcache

cache = diskcache.FanoutCache('/tmp/diskcache/fanoutcache')


@functools.lru_cache(1, typed=True)
def getCt(series_uid):
    return Ct(series_uid)


@cache.memoize(typed=True)
def getCtInputChunk(series_uid, center_xyz, width_irc):
    ct = getCt(series_uid)
    ct_chunk, center_irc = ct.getInputChunk(center_xyz, width_irc)
    return ct_chunk, center_irc


# In[3]: