Exemplo n.º 1
0
def backend():
    backend_dir = tempfile.mkdtemp(prefix='s3ql-backend-')
    be = local.Backend('local://' + backend_dir, None, None)
    try:
        yield be
    finally:
        be.close()
        shutil.rmtree(backend_dir)
Exemplo n.º 2
0
def backend():
    backend_dir = tempfile.mkdtemp(prefix='s3ql-backend-')
    plain_backend = local.Backend(
        Namespace(storage_url='local://' + backend_dir))
    backend = ComprencBackend(b'schnorz', ('zlib', 6), plain_backend)
    try:
        yield backend
    finally:
        backend.close()
        shutil.rmtree(backend_dir)
Exemplo n.º 3
0
def ctx():
    ctx = Namespace()
    ctx.backend_dir = tempfile.mkdtemp(prefix='s3ql-backend-')
    ctx.backend_pool = BackendPool(lambda: local.Backend(
        Namespace(storage_url='local://' + ctx.backend_dir)))

    ctx.cachedir = tempfile.mkdtemp(prefix='s3ql-cache-')
    ctx.max_obj_size = 1024

    # Destructors are not guaranteed to run, and we can't unlink
    # the file immediately because apsw refers to it by name.
    # Therefore, we unlink the file manually in tearDown()
    ctx.dbfile = tempfile.NamedTemporaryFile(delete=False)
    ctx.db = Connection(ctx.dbfile.name)
    create_tables(ctx.db)
    init_tables(ctx.db)

    # Create an inode we can work with
    ctx.inode = 42
    now_ns = time_ns()
    ctx.db.execute(
        "INSERT INTO inodes (id,mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount,size) "
        "VALUES (?,?,?,?,?,?,?,?,?)",
        (ctx.inode, stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
         | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH,
         os.getuid(), os.getgid(), now_ns, now_ns, now_ns, 1, 32))

    cache = BlockCache(ctx.backend_pool, ctx.db, ctx.cachedir + "/cache",
                       ctx.max_obj_size * 100)
    ctx.cache = cache

    # Monkeypatch around the need for removal and upload threads
    cache.to_remove = DummyQueue(cache)

    class DummyDistributor:
        def put(ctx, arg, timeout=None):
            cache._do_upload(*arg)
            return True

    cache.to_upload = DummyDistributor()

    # Tested methods assume that they are called from
    # file system request handler
    s3ql.block_cache.lock = MockLock()
    s3ql.block_cache.lock_released = MockLock()

    try:
        yield ctx
    finally:
        ctx.cache.backend_pool = ctx.backend_pool
        ctx.cache.destroy()
        shutil.rmtree(ctx.cachedir)
        shutil.rmtree(ctx.backend_dir)
        ctx.dbfile.close()
        os.unlink(ctx.dbfile.name)
Exemplo n.º 4
0
def ctx():
    ctx = Namespace()
    ctx.backend_dir = tempfile.mkdtemp(prefix='s3ql-backend-')
    plain_backend = local.Backend(Namespace(
        storage_url='local://' + ctx.backend_dir))
    ctx.backend_pool = BackendPool(lambda: ComprencBackend(b'schwubl', ('zlib', 6),
                                                          plain_backend))
    ctx.backend = ctx.backend_pool.pop_conn()
    ctx.cachedir = tempfile.mkdtemp(prefix='s3ql-cache-')
    ctx.max_obj_size = 1024

    # Destructors are not guaranteed to run, and we can't unlink
    # the file immediately because apsw refers to it by name.
    # Therefore, we unlink the file manually in tearDown()
    ctx.dbfile = tempfile.NamedTemporaryFile(delete=False)

    ctx.db = Connection(ctx.dbfile.name)
    create_tables(ctx.db)
    init_tables(ctx.db)

    # Tested methods assume that they are called from
    # file system request handler
    llfuse.lock.acquire()

    cache = BlockCache(ctx.backend_pool, ctx.db, ctx.cachedir + "/cache",
                       ctx.max_obj_size * 5)
    ctx.block_cache = cache
    ctx.server = fs.Operations(cache, ctx.db, ctx.max_obj_size,
                                InodeCache(ctx.db, 0))
    ctx.server.init()

    # Monkeypatch around the need for removal and upload threads
    cache.to_remove = DummyQueue(cache)

    class DummyDistributor:
        def put(ctx, arg, timeout=None):
            cache._do_upload(*arg)
            return True
    cache.to_upload = DummyDistributor()

    # Keep track of unused filenames
    ctx.name_cnt = 0

    yield ctx

    ctx.server.inodes.destroy()
    llfuse.lock.release()
    ctx.block_cache.destroy()
    shutil.rmtree(ctx.cachedir)
    shutil.rmtree(ctx.backend_dir)
    os.unlink(ctx.dbfile.name)
    ctx.dbfile.close()
Exemplo n.º 5
0
async def ctx():
    ctx = Namespace()
    ctx.backend_dir = tempfile.mkdtemp(prefix='s3ql-backend-')
    ctx.backend_pool = BackendPool(lambda: local.Backend(
        Namespace(storage_url='local://' + ctx.backend_dir)))

    ctx.cachedir = tempfile.mkdtemp(prefix='s3ql-cache-')
    ctx.max_obj_size = 1024

    # Destructors are not guaranteed to run, and we can't unlink
    # the file immediately because apsw refers to it by name.
    # Therefore, we unlink the file manually in tearDown()
    ctx.dbfile = tempfile.NamedTemporaryFile(delete=False)
    ctx.db = Connection(ctx.dbfile.name)
    create_tables(ctx.db)
    init_tables(ctx.db)

    # Create an inode we can work with
    ctx.inode = 42
    now_ns = time_ns()
    ctx.db.execute(
        "INSERT INTO inodes (id,mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount,size) "
        "VALUES (?,?,?,?,?,?,?,?,?)",
        (ctx.inode, stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
         | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH,
         os.getuid(), os.getgid(), now_ns, now_ns, now_ns, 1, 32))

    cache = BlockCache(ctx.backend_pool, ctx.db, ctx.cachedir + "/cache",
                       ctx.max_obj_size * 100)
    cache.portal = trio.BlockingTrioPortal()
    ctx.cache = cache

    # Monkeypatch around the need for removal and upload threads
    cache.to_remove = DummyQueue(cache)

    class DummyChannel:
        async def send(self, arg):
            await trio.run_sync_in_worker_thread(cache._do_upload, *arg)

    cache.to_upload = (DummyChannel(), None)

    try:
        yield ctx
    finally:
        ctx.cache.backend_pool = ctx.backend_pool
        if ctx.cache.destroy is not None:
            await ctx.cache.destroy()
        shutil.rmtree(ctx.cachedir)
        shutil.rmtree(ctx.backend_dir)
        ctx.dbfile.close()
        os.unlink(ctx.dbfile.name)
Exemplo n.º 6
0
    def setUp(self):
        self.backend_dir = tempfile.mkdtemp(prefix='s3ql-backend-')
        self.backend = local.Backend('local://' + self.backend_dir, None, None)
        self.cachedir = tempfile.mkdtemp(prefix='s3ql-cache-')
        self.max_obj_size = 1024

        self.dbfile = tempfile.NamedTemporaryFile()
        self.db = Connection(self.dbfile.name)
        create_tables(self.db)
        init_tables(self.db)

        self.fsck = Fsck(self.cachedir, self.backend,
                         {'max_obj_size': self.max_obj_size}, self.db)
        self.fsck.expect_errors = True
Exemplo n.º 7
0
    def test_clear(self):
        self.mkfs()

        proc = subprocess.Popen(self.s3ql_cmd_argv('s3qladm') + [
            '--quiet', '--log', 'none', '--authfile', '/dev/null', 'clear',
            self.storage_url
        ],
                                stdin=subprocess.PIPE,
                                universal_newlines=True)
        print('yes', file=proc.stdin)
        proc.stdin.close()
        self.assertEqual(proc.wait(), 0)

        plain_backend = local.Backend(Namespace(storage_url=self.storage_url))
        assert list(plain_backend.list()) == []
Exemplo n.º 8
0
async def ctx():
    ctx = Namespace()
    ctx.backend_dir = tempfile.mkdtemp(prefix='s3ql-backend-')
    plain_backend = local.Backend(
        Namespace(storage_url='local://' + ctx.backend_dir))
    ctx.backend_pool = BackendPool(
        lambda: ComprencBackend(b'schwubl', ('zlib', 6), plain_backend))
    ctx.backend = ctx.backend_pool.pop_conn()
    ctx.cachedir = tempfile.mkdtemp(prefix='s3ql-cache-')
    ctx.max_obj_size = 1024

    # Destructors are not guaranteed to run, and we can't unlink
    # the file immediately because apsw refers to it by name.
    # Therefore, we unlink the file manually in tearDown()
    ctx.dbfile = tempfile.NamedTemporaryFile(delete=False)

    ctx.db = Connection(ctx.dbfile.name)
    create_tables(ctx.db)
    init_tables(ctx.db)

    cache = BlockCache(ctx.backend_pool, ctx.db, ctx.cachedir + "/cache",
                       ctx.max_obj_size * 5)
    cache.portal = trio.BlockingTrioPortal()
    ctx.cache = cache
    ctx.server = fs.Operations(cache, ctx.db, ctx.max_obj_size,
                               InodeCache(ctx.db, 0))
    ctx.server.init()

    # Monkeypatch around the need for removal and upload threads
    cache.to_remove = DummyQueue(cache)

    class DummyChannel:
        async def send(self, arg):
            await trio.run_sync_in_worker_thread(cache._do_upload, *arg)

    cache.to_upload = (DummyChannel(), None)

    # Keep track of unused filenames
    ctx.name_cnt = 0

    yield ctx

    ctx.server.inodes.destroy()
    await ctx.cache.destroy()
    shutil.rmtree(ctx.cachedir)
    shutil.rmtree(ctx.backend_dir)
    os.unlink(ctx.dbfile.name)
    ctx.dbfile.close()
Exemplo n.º 9
0
    def setUp(self):

        self.backend_dir = tempfile.mkdtemp(prefix='s3ql-backend-')
        self.backend_pool = BackendPool(
            lambda: local.Backend('local://' + self.backend_dir, None, None))

        self.cachedir = tempfile.mkdtemp(prefix='s3ql-cache-')
        self.max_obj_size = 1024

        # Destructors are not guaranteed to run, and we can't unlink
        # the file immediately because apsw refers to it by name.
        # Therefore, we unlink the file manually in tearDown()
        self.dbfile = tempfile.NamedTemporaryFile(delete=False)
        self.db = Connection(self.dbfile.name)
        create_tables(self.db)
        init_tables(self.db)

        # Create an inode we can work with
        self.inode = 42
        self.db.execute(
            "INSERT INTO inodes (id,mode,uid,gid,mtime,atime,ctime,refcount,size) "
            "VALUES (?,?,?,?,?,?,?,?,?)",
            (self.inode,
             stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
             | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH,
             os.getuid(), os.getgid(), time.time(), time.time(), time.time(),
             1, 32))

        cache = BlockCache(self.backend_pool, self.db,
                           self.cachedir + "/cache", self.max_obj_size * 100)
        self.cache = cache

        # Monkeypatch around the need for removal and upload threads
        cache.to_remove = DummyQueue(cache)

        class DummyDistributor:
            def put(self, arg, timeout=None):
                cache._do_upload(*arg)
                return True

        cache.to_upload = DummyDistributor()

        # Tested methods assume that they are called from
        # file system request handler
        llfuse.lock.acquire()
Exemplo n.º 10
0
    def setUp(self):
        self.backend_dir = tempfile.mkdtemp(prefix='s3ql-backend-')
        plain_backend = local.Backend('local://' + self.backend_dir, None,
                                      None)
        self.backend_pool = BackendPool(
            lambda: ComprencBackend(b'schwubl', ('zlib', 6), plain_backend))
        self.backend = self.backend_pool.pop_conn()
        self.cachedir = tempfile.mkdtemp(prefix='s3ql-cache-')
        self.max_obj_size = 1024

        # Destructors are not guaranteed to run, and we can't unlink
        # the file immediately because apsw refers to it by name.
        # Therefore, we unlink the file manually in tearDown()
        self.dbfile = tempfile.NamedTemporaryFile(delete=False)

        self.db = Connection(self.dbfile.name)
        create_tables(self.db)
        init_tables(self.db)

        # Tested methods assume that they are called from
        # file system request handler
        llfuse.lock.acquire()

        cache = BlockCache(self.backend_pool, self.db,
                           self.cachedir + "/cache", self.max_obj_size * 5)
        self.block_cache = cache
        self.server = fs.Operations(cache, self.db, self.max_obj_size,
                                    InodeCache(self.db, 0))
        self.server.init()

        # Monkeypatch around the need for removal and upload threads
        cache.to_remove = DummyQueue(cache)

        class DummyDistributor:
            def put(self, arg, timeout=None):
                cache._do_upload(*arg)
                return True

        cache.to_upload = DummyDistributor()

        # Keep track of unused filenames
        self.name_cnt = 0
Exemplo n.º 11
0
Arquivo: t4_adm.py Projeto: mkhon/s3ql
    def test_passphrase(self):
        self.mkfs()

        passphrase_new = 'sd982jhd'

        proc = subprocess.Popen(self.s3ql_cmd_argv('s3qladm') +
                                [ '--quiet', '--fatal-warnings', '--log', 'none', '--authfile',
                                  '/dev/null', 'passphrase', self.storage_url ],
                                stdin=subprocess.PIPE, universal_newlines=True)

        print(self.passphrase, file=proc.stdin)
        print(passphrase_new, file=proc.stdin)
        print(passphrase_new, file=proc.stdin)
        proc.stdin.close()

        self.assertEqual(proc.wait(), 0)

        plain_backend = local.Backend(self.storage_url, None, None)
        backend = ComprencBackend(passphrase_new.encode(), ('zlib', 6), plain_backend)

        backend.fetch('s3ql_passphrase') # will fail with wrong pw
Exemplo n.º 12
0
    def test_key_recovery(self):
        mkfs_output = self.mkfs()

        hit = re.search(
            r'^---BEGIN MASTER KEY---\n'
            r'(.+)\n'
            r'---END MASTER KEY---$', mkfs_output, re.MULTILINE)
        assert hit
        master_key = hit.group(1)

        plain_backend = local.Backend(Namespace(storage_url=self.storage_url))
        del plain_backend['s3ql_passphrase']  # Oops

        backend = ComprencBackend(self.passphrase.encode(), ('zlib', 6),
                                  plain_backend)
        with pytest.raises(CorruptedObjectError):
            backend.fetch('s3ql_metadata')

        passphrase_new = 'sd982jhd'

        proc = subprocess.Popen(self.s3ql_cmd_argv('s3qladm') + [
            '--quiet', '--log', 'none', '--authfile', '/dev/null',
            'recover-key', self.storage_url
        ],
                                stdin=subprocess.PIPE,
                                universal_newlines=True)

        print(master_key, file=proc.stdin)
        print(passphrase_new, file=proc.stdin)
        print(passphrase_new, file=proc.stdin)
        proc.stdin.close()
        self.assertEqual(proc.wait(), 0)

        backend = ComprencBackend(passphrase_new.encode(), ('zlib', 6),
                                  plain_backend)

        backend.fetch('s3ql_passphrase')  # will fail with wrong pw
Exemplo n.º 13
0
 def setUp(self):
     self.name_cnt = 0
     self.backend_dir = tempfile.mkdtemp(prefix='s3ql-backend-')
     self.plain_backend = local.Backend('local://' + self.backend_dir, None, None)
     self.backend = self._wrap_backend()
     self.retries = 0