Пример #1
0
def test_blob_file(metasync, opts):
    "test blobfile-related operations"

    test_init(metasync, opts)

    bs = blobs.BlobStore2(metasync)
    blob_file = blobs.BlobFile2(bs)

    # empty file
    assert blob_file.hv is not None \
        and len(blob_file.entries) == 0

    # random file with 3 chunks (last one is smaller than unit)
    unit = 1 * MB
    size = 3 * MB - 2 * KB
    pn = os.path.join(opts.tmpdir, "file-%s" % size)
    util.create_random_file(pn, size)

    # store each chunk to blob_file
    blob_file = bs.load_file(pn, unit)

    # check entries and total size
    assert len(blob_file.entries) == 3 and blob_file.size == size

    # test store/load
    blob_file.store()

    # loaded from disk
    loaded_blob = blobs.BlobFile2(bs, blob_file.hv)
    assert loaded_blob.dump() == blob_file.dump()
Пример #2
0
def test_concurrent_upload(metasync, opts):

    def _put(srv, path, remote_path):
        with open(path, "rb") as f:
            srv.put(remote_path, f.read())

    # bump files
    tmpdir = os.path.join(opts.tmpdir, "metasync-files")
    sizes  = [1024, 2048, 4192, 8192, 1*MB]
    files  = []
    total_size = 1*MB

    print tmpdir

    util.mkdirs(tmpdir)
    for size in sizes:
        count = total_size / size
        fl = []
        for i in range(count):
            fn = "file-%s-%s" % (size, i)
            pn = os.path.join(tmpdir, fn)
            if not os.path.exists(pn):
                util.create_random_file(pn, size)
            fl.append(fn)
        files.append(fl)

    from metasyncAPI import Worker, ThreadPool
    from multiprocessing import cpu_count

    pool = ThreadPool(cpu_count())

    # try uploading each file
    result = [["Services"] + files]
    for cls in services.all_services:
        if cls in [services.DiskAPI]:
            continue
        row = [services.slug(cls)]
        srv = cls()
        if srv.exists('/concurrent_upload'):
            srv.rmdir('/concurrent_upload')
        srv.putdir('/concurrent_upload')
        print 'uploading:', row[0]

        for fl in files:
            beg = time.time()
            for f in fl:
                path = os.path.join(tmpdir, f)
                remote_path = '/concurrent_upload/%s' % f
                pool.submit(srv.copy, _put, path, remote_path)
            pool.join()
            end = time.time()
            row.append(end - beg)

        result.append(row)

    # tabularize
    for row in result:
        for e in row:
            print "%s\t" % e,
        print
Пример #3
0
def test_blob_file(metasync, opts):
    "test blobfile-related operations"

    test_init(metasync, opts)

    bs = blobs.BlobStore2(metasync)
    blob_file = blobs.BlobFile2(bs)

    # empty file
    assert blob_file.hv is not None \
        and len(blob_file.entries) == 0

    # random file with 3 chunks (last one is smaller than unit)
    unit = 1*MB
    size = 3*MB - 2*KB
    pn = os.path.join(opts.tmpdir, "file-%s" % size)
    util.create_random_file(pn, size)

    # store each chunk to blob_file
    blob_file = bs.load_file(pn, unit)

    # check entries and total size
    assert len(blob_file.entries) == 3 and blob_file.size == size

    # test store/load
    blob_file.store()

    # loaded from disk
    loaded_blob = blobs.BlobFile2(bs, blob_file.hv)
    assert loaded_blob.dump() == blob_file.dump()
Пример #4
0
def test_fetch(metasync, opts):
    "test fetching"

    clone = test_clone(metasync, opts)

    file_sizes = [1024, 2048]
    for size in file_sizes:
        pn = os.path.join(clone.path_root, "file-%s-2" % size)
        util.create_random_file(pn, size)
        clone.cmd_checkin(pn)
    pn = os.path.join(clone.path_root, "dir1")
    util.mkdirs(pn)
    clone.cmd_checkin(pn)
    pn = os.path.join(clone.path_root, "dir2")
    util.mkdirs(pn)
    pn = os.path.join(clone.path_root, "dir2", "file-1024")
    util.create_random_file(pn, 1024)
    pn = os.path.join(clone.path_root, "dir2")
    clone.cmd_checkin(pn)
    clone.cmd_push()
    root2 = clone.get_root_blob()
    metasync.cmd_fetch()
    metasync.cmd_update()
    root = metasync.get_root_blob()
    cnt = 0
    for i in root.walk():
        cnt += 1
    assert cnt == 7
Пример #5
0
def test_fetch(metasync, opts):
    "test fetching"

    clone = test_clone(metasync, opts)

    file_sizes = [1024, 2048]
    for size in file_sizes:
        pn = os.path.join(clone.path_root, "file-%s-2" % size)
        util.create_random_file(pn, size)
        clone.cmd_checkin(pn)
    pn = os.path.join(clone.path_root, "dir1")
    util.mkdirs(pn)
    clone.cmd_checkin(pn)
    pn = os.path.join(clone.path_root, "dir2")
    util.mkdirs(pn)
    pn = os.path.join(clone.path_root, "dir2", "file-1024")
    util.create_random_file(pn, 1024)
    pn = os.path.join(clone.path_root, "dir2")
    clone.cmd_checkin(pn)
    clone.cmd_push()
    root2 = clone.get_root_blob()
    metasync.cmd_fetch()
    metasync.cmd_update()
    root = metasync.get_root_blob()
    cnt = 0
    for i in root.walk():
        cnt += 1
    assert cnt == 7
Пример #6
0
def test_concurrent_upload(metasync, opts):
    def _put(srv, path, remote_path):
        with open(path, "rb") as f:
            srv.put(remote_path, f.read())

    # bump files
    tmpdir = os.path.join(opts.tmpdir, "metasync-files")
    sizes = [1024, 2048, 4192, 8192, 1 * MB]
    files = []
    total_size = 1 * MB

    print tmpdir

    util.mkdirs(tmpdir)
    for size in sizes:
        count = total_size / size
        fl = []
        for i in range(count):
            fn = "file-%s-%s" % (size, i)
            pn = os.path.join(tmpdir, fn)
            if not os.path.exists(pn):
                util.create_random_file(pn, size)
            fl.append(fn)
        files.append(fl)

    from metasyncAPI import Worker, ThreadPool
    from multiprocessing import cpu_count

    pool = ThreadPool(cpu_count())

    # try uploading each file
    result = [["Services"] + files]
    for cls in services.all_services:
        if cls in [services.DiskAPI]:
            continue
        row = [services.slug(cls)]
        srv = cls()
        if srv.exists('/concurrent_upload'):
            srv.rmdir('/concurrent_upload')
        srv.putdir('/concurrent_upload')
        print 'uploading:', row[0]

        for fl in files:
            beg = time.time()
            for f in fl:
                path = os.path.join(tmpdir, f)
                remote_path = '/concurrent_upload/%s' % f
                pool.submit(srv.copy, _put, path, remote_path)
            pool.join()
            end = time.time()
            row.append(end - beg)

        result.append(row)

    # tabularize
    for row in result:
        for e in row:
            print "%s\t" % e,
        print
Пример #7
0
def test_bench_upload(metasync, opts):
    "bencmark upload speed of storage services"

    # bump files
    tmpdir = os.path.join(opts.tmpdir, "metasync-files")
    sizes  = [1024, 2048, 1*MB]
    files  = []

    # for real bench
    if opts.slow:
        sizes = [10*MB, 100*MB]

    util.mkdirs(tmpdir)
    for size in sizes:
        fn = "file-%s" % size
        pn = os.path.join(tmpdir, fn)
        if not os.path.exists(pn):
            util.create_random_file(pn, size)
        files.append(fn)

    # try uploading each file
    result = [["Services"] + files]
    for cls in services.all_services:
        if cls in [services.DiskAPI]:
            continue
        if opt.slow and cls in [services.BaiduAPI]:
            continue
        row = [services.slug(cls)]
        srv = cls()
        print 'uploading:', row[0]

        if srv.exists('/upload_test'):
            srv.rmdir('/upload_test')
        srv.putdir('/upload_test')

        for f in files:
            #if row[0] == 'baidu' and f == 'file-104857600':
            #    continue
            content = open(os.path.join(tmpdir, f), 'r').read()
            beg = time.time()
            srv.put('/upload_test/' + f, content)
            end = time.time()
            row.append(end - beg)

        result.append(row)

    # tabularize
    for row in result:
        for e in row:
            print "%s\t" % e,
        print
Пример #8
0
def test_bench_upload(metasync, opts):
    "bencmark upload speed of storage services"

    # bump files
    tmpdir = os.path.join(opts.tmpdir, "metasync-files")
    sizes = [1024, 2048, 1 * MB]
    files = []

    # for real bench
    if opts.slow:
        sizes = [10 * MB, 100 * MB]

    util.mkdirs(tmpdir)
    for size in sizes:
        fn = "file-%s" % size
        pn = os.path.join(tmpdir, fn)
        if not os.path.exists(pn):
            util.create_random_file(pn, size)
        files.append(fn)

    # try uploading each file
    result = [["Services"] + files]
    for cls in services.all_services:
        if cls in [services.DiskAPI]:
            continue
        if opt.slow and cls in [services.BaiduAPI]:
            continue
        row = [services.slug(cls)]
        srv = cls()
        print 'uploading:', row[0]

        if srv.exists('/upload_test'):
            srv.rmdir('/upload_test')
        srv.putdir('/upload_test')

        for f in files:
            #if row[0] == 'baidu' and f == 'file-104857600':
            #    continue
            content = open(os.path.join(tmpdir, f), 'r').read()
            beg = time.time()
            srv.put('/upload_test/' + f, content)
            end = time.time()
            row.append(end - beg)

        result.append(row)

    # tabularize
    for row in result:
        for e in row:
            print "%s\t" % e,
        print
Пример #9
0
def test_merge(metasync, opts):
    clone = test_clone(metasync, opts)
    new_files = [3072, 4096]
    metasyncs = [metasync, clone]

    for i in range(2):
        dbg.info("checkin %d" % i)
        pn = os.path.join(metasyncs[i].path_root, "file-%s" % new_files[i]) 
        util.create_random_file(pn, new_files[i])
        metasyncs[i].cmd_checkin(pn)

    metasync.cmd_push()
    clone.cmd_fetch()
    assert not clone.cmd_push()
    clone.cmd_update()
    assert clone.cmd_push()
Пример #10
0
def test_checkin_dir(metasync, opts):
    "test checkin with directory"

    test_init(metasync, opts)

    dst = os.path.join(metasync.path_root, "a/b")
    util.mkdirs(dst)
    pn = os.path.join(dst, "test-1024")
    util.create_random_file(pn, 1024)

    dst = os.path.join(metasync.path_root, "a")

    metasync.cmd_checkin(dst)
    metasync.cmd_push()

    test_clone(metasync, opts, False)
Пример #11
0
def test_merge(metasync, opts):
    clone = test_clone(metasync, opts)
    new_files = [3072, 4096]
    metasyncs = [metasync, clone]

    for i in range(2):
        dbg.info("checkin %d" % i)
        pn = os.path.join(metasyncs[i].path_root, "file-%s" % new_files[i])
        util.create_random_file(pn, new_files[i])
        metasyncs[i].cmd_checkin(pn)

    metasync.cmd_push()
    clone.cmd_fetch()
    assert not clone.cmd_push()
    clone.cmd_update()
    assert clone.cmd_push()
Пример #12
0
def test_checkin_dir(metasync, opts):
    "test checkin with directory"

    test_init(metasync, opts)

    dst = os.path.join(metasync.path_root, "a/b")
    util.mkdirs(dst)
    pn = os.path.join(dst, "test-1024")
    util.create_random_file(pn, 1024)

    dst = os.path.join(metasync.path_root, "a")

    metasync.cmd_checkin(dst)
    metasync.cmd_push()

    test_clone(metasync, opts, False)
Пример #13
0
def test_uptodate_master(metasync, opts):
    "check uptodate master"
    #XXX not yet done
    clone = test_clone(metasync, opts)
    assert metasync.get_next_version() == 2
    assert clone.get_next_version() == 2
    assert metasync.get_uptodate_master() != None

    file_sizes = [1024, 2048]
    for size in file_sizes:
        pn = os.path.join(clone.path_root, "file-%s-2" % size)
        util.create_random_file(pn, size)
        clone.cmd_checkin(pn)
    clone.cmd_push()

    master = metasync.get_uptodate_master() 
    metasync.cmd_fetch()
    metasync.cmd_update()
    assert master == metasync.get_prev_value()
Пример #14
0
def test_uptodate_master(metasync, opts):
    "check uptodate master"
    #XXX not yet done
    clone = test_clone(metasync, opts)
    assert metasync.get_next_version() == 2
    assert clone.get_next_version() == 2
    assert metasync.get_uptodate_master() != None

    file_sizes = [1024, 2048]
    for size in file_sizes:
        pn = os.path.join(clone.path_root, "file-%s-2" % size)
        util.create_random_file(pn, size)
        clone.cmd_checkin(pn)
    clone.cmd_push()

    master = metasync.get_uptodate_master()
    metasync.cmd_fetch()
    metasync.cmd_update()
    assert master == metasync.get_prev_value()
Пример #15
0
def test_init(metasync, opts):
    "test inititation"

    _init_disk_metasync(metasync, opts, 3, 2, opts.encrypt_key)

    # create/commit some files
    file_sizes = [1024, 2048]
    if opts.slow:
        # bigger one that splits by blob
        MB = 1024 * 1024
        file_sizes.append(33 * MB)

    for size in file_sizes:
        pn = os.path.join(opts.root, "file-%s" % size)
        util.create_random_file(pn, size)
        metasync.cmd_checkin(pn)

    metasync.cmd_push()

    root = metasync.get_root_blob()
    assert len(root.entries) == len(file_sizes)
Пример #16
0
def test_init(metasync, opts):
    "test inititation"

    _init_disk_metasync(metasync, opts, 3, 2, opts.encrypt_key)

    # create/commit some files
    file_sizes = [1024, 2048]
    if opts.slow:
        # bigger one that splits by blob
        MB = 1024*1024
        file_sizes.append(33 * MB)

    for size in file_sizes:
        pn = os.path.join(opts.root, "file-%s" % size)
        util.create_random_file(pn, size)
        metasync.cmd_checkin(pn)

    metasync.cmd_push()

    root = metasync.get_root_blob()
    assert len(root.entries) == len(file_sizes)
Пример #17
0
def test_blob_load(metasync, opts):
    "test loading file/dir from a path"

    _init_disk_metasync(metasync, opts)

    bs = blobs.BlobStore2(metasync)

    # /a/b/c
    dirp = metasync.get_local_path("a", "b", "c")
    util.mkdirs(dirp)

    # /a/b/c/file
    pn = os.path.join(dirp, "file")
    util.create_random_file(pn, 5*KB)

    blob = bs.load_dir(dirp)
    blob.add("file", bs.load_file(pn))

    # count how many blobs
    root = bs.get_root_blob()
    dbg.dbg("%-15s: %s" % ("/", root.hv))

    cnt = 0
    for (name, blob) in bs.walk():
        dbg.dbg("%-15s: %s" % (name, blob.hv))
        cnt += 1

    assert cnt == len(["a", "b", "c", "file"])

    # flush all new blobs
    assert len(os.listdir(metasync.path_objs)) == 0
    root.store()
    assert len(os.listdir(metasync.path_objs)) == 6

    # "." => root
    test_blob = bs.load_dir(metasync.get_local_path("."))
    assert test_blob == root

    test_blob = bs.load_dir(metasync.get_local_path(""))
    assert test_blob == root
Пример #18
0
def test_blob_load(metasync, opts):
    "test loading file/dir from a path"

    _init_disk_metasync(metasync, opts)

    bs = blobs.BlobStore2(metasync)

    # /a/b/c
    dirp = metasync.get_local_path("a", "b", "c")
    util.mkdirs(dirp)

    # /a/b/c/file
    pn = os.path.join(dirp, "file")
    util.create_random_file(pn, 5 * KB)

    blob = bs.load_dir(dirp)
    blob.add("file", bs.load_file(pn))

    # count how many blobs
    root = bs.get_root_blob()
    dbg.dbg("%-15s: %s" % ("/", root.hv))

    cnt = 0
    for (name, blob) in bs.walk():
        dbg.dbg("%-15s: %s" % (name, blob.hv))
        cnt += 1

    assert cnt == len(["a", "b", "c", "file"])

    # flush all new blobs
    assert len(os.listdir(metasync.path_objs)) == 0
    root.store()
    assert len(os.listdir(metasync.path_objs)) == 6

    # "." => root
    test_blob = bs.load_dir(metasync.get_local_path("."))
    assert test_blob == root

    test_blob = bs.load_dir(metasync.get_local_path(""))
    assert test_blob == root
Пример #19
0
def test_rm(metasync, opts):
    "test rm file"

    _init_disk_metasync(metasync, opts, 3, 2, opts.encrypt_key)

    # create/commit some files
    size = 512
    for i in range(5):
        pn = os.path.join(opts.root, "file-%s-%s" % (size, i))
        util.create_random_file(pn, size)
        metasync.cmd_checkin(pn)

    pn = os.path.join(opts.root, "a/b")
    util.mkdirs(pn)
    metasync.cmd_checkin(pn)
    metasync.cmd_push()

    pn = os.path.join(opts.root, "a/b/e")
    util.mkdirs(pn)

    # try to remove non-exist directory
    pn = os.path.join(opts.root, "a/b/c/d")
    assert not metasync.cmd_rm(pn)

    pn = os.path.join(opts.root, "a/b/e/f")
    assert not metasync.cmd_rm(pn)

    # try to remove non-exist file

    for i in range(3):
        pn = os.path.join(opts.root, "file-%s-%s" % (size, i))
        metasync.cmd_rm(pn)
        assert not os.path.exists(pn)

    metasync.cmd_rm(os.path.join(opts.root,"a/b"))

    metasync.cmd_push()
Пример #20
0
def test_rm(metasync, opts):
    "test rm file"

    _init_disk_metasync(metasync, opts, 3, 2, opts.encrypt_key)

    # create/commit some files
    size = 512
    for i in range(5):
        pn = os.path.join(opts.root, "file-%s-%s" % (size, i))
        util.create_random_file(pn, size)
        metasync.cmd_checkin(pn)

    pn = os.path.join(opts.root, "a/b")
    util.mkdirs(pn)
    metasync.cmd_checkin(pn)
    metasync.cmd_push()

    pn = os.path.join(opts.root, "a/b/e")
    util.mkdirs(pn)

    # try to remove non-exist directory
    pn = os.path.join(opts.root, "a/b/c/d")
    assert not metasync.cmd_rm(pn)

    pn = os.path.join(opts.root, "a/b/e/f")
    assert not metasync.cmd_rm(pn)

    # try to remove non-exist file

    for i in range(3):
        pn = os.path.join(opts.root, "file-%s-%s" % (size, i))
        metasync.cmd_rm(pn)
        assert not os.path.exists(pn)

    metasync.cmd_rm(os.path.join(opts.root, "a/b"))

    metasync.cmd_push()