Exemplo n.º 1
0
def test_corrupted_meta(backend, monkeypatch):
    key = 'brafasel'
    value = b'hello there, let us see whats going on'
    backend[key] = value

    # Monkeypatch request handler to mess up metadata
    handler_class = mock_server.S3CRequestHandler

    def send_header(self,
                    keyword,
                    value,
                    count=[0],
                    send_header_real=handler_class.send_header):
        if keyword == self.hdr_prefix + 'Meta-md5':
            count[0] += 1
            if count[0] <= 3:
                value = value[::-1]
        return send_header_real(self, keyword, value)

    monkeypatch.setattr(handler_class, 'send_header', send_header)

    with catch_logmsg('^MD5 mismatch in metadata for',
                      count=1,
                      level=logging.WARNING):
        assert_raises(BadDigestError, backend.fetch, key)

    enable_temp_fail(backend)
    with catch_logmsg('^MD5 mismatch in metadata for',
                      count=2,
                      level=logging.WARNING):
        assert backend[key] == value
Exemplo n.º 2
0
def test_retrieve(backend, db):
    plain_backend = backend
    backend = ComprencBackend(b'schnorz', ('zlib', 6), plain_backend)

    # Create a few objects in db
    obj_ids = (22, 25, 30, 31)
    for id_ in obj_ids:
        db.execute('INSERT INTO objects (id, refcount, size) VALUES(?, ?, ?)',
                   (id_, 1, 27 * id_))

    # Object one will be missing in backend

    # Object two will have a checksum error in the data
    key = 's3ql_data_%d' % obj_ids[1]
    backend[key] = b'some data that will be broken on a data check'
    (raw, meta) = plain_backend.fetch(key)
    raw = bytearray(raw)
    assert len(raw) > 20
    raw[-10:-6] = b'forg'
    plain_backend.store(key, raw, meta)

    # Object three will have a checksum error in the metadata
    key = 's3ql_data_%d' % obj_ids[2]
    backend.store(key, b'some data that will be broken on a metadata check',
                  { 'meta-key1': 'some textual data that just increases',
                    'meta-key2': 'the metadata size so that we can tamper with it' })
    meta = plain_backend.lookup(key)
    raw = bytearray(meta['data'])
    assert len(raw) > 20
    raw[-10:-6] = b'forg'
    meta['data'] = raw
    plain_backend.update_meta(key, meta)

    # Object four will be ok
    backend['s3ql_data_%d' % obj_ids[3]] = b'some data that is well'

    # When using a single thread, we can fake the backend factory
    def backend_factory():
        return backend

    missing_fh = io.StringIO()
    corrupted_fh = io.StringIO()

    with catch_logmsg('^Backend seems to have lost', count=1, level=logging.WARNING), \
         catch_logmsg('^Object %d is corrupted', count=1, level=logging.WARNING):
        verify.retrieve_objects(db, backend_factory, corrupted_fh, missing_fh,
                                thread_count=1, full=False)
    assert missing_fh.getvalue() == 's3ql_data_%d\n' % obj_ids[0]
    assert corrupted_fh.getvalue() == 's3ql_data_%d\n' % obj_ids[2]

    missing_fh = io.StringIO()
    corrupted_fh = io.StringIO()
    with catch_logmsg('^Backend seems to have lost', count=1, level=logging.WARNING), \
         catch_logmsg('^Object %d is corrupted', count=2, level=logging.WARNING):
        verify.retrieve_objects(db, backend_factory, corrupted_fh, missing_fh,
                                thread_count=1, full=True)
    assert missing_fh.getvalue() == 's3ql_data_%d\n' % obj_ids[0]
    assert corrupted_fh.getvalue() == ('s3ql_data_%d\n'*2) % obj_ids[1:3]
Exemplo n.º 3
0
    def test_failsafe(self):
        len_ = self.max_obj_size
        data = self.random_data(len_)
        (fh, inode) = self.server.create(ROOT_INODE, self.newname(),
                                         self.file_mode(), os.O_RDWR, Ctx())
        self.server.write(fh, 0, data)
        self.server.cache.clear()
        self.assertTrue(self.server.failsafe is False)

        datafile = os.path.join(self.backend_dir, 's3ql_data_', 's3ql_data_1')
        shutil.copy(datafile, datafile + '.bak')

        # Modify contents
        with open(datafile, 'rb+') as rfh:
            rfh.seek(560)
            rfh.write(b'blrub!')
        with self.assertRaises(FUSEError) as cm:
            with catch_logmsg('^Backend returned malformed data for',
                              count=1,
                              level=logging.ERROR):
                self.server.read(fh, 0, len_)
        self.assertEqual(cm.exception.errno, errno.EIO)
        self.assertTrue(self.server.failsafe)

        # Restore contents, but should be marked as damaged now
        os.rename(datafile + '.bak', datafile)
        with self.assertRaises(FUSEError) as cm:
            self.server.read(fh, 0, len_)
        self.assertEqual(cm.exception.errno, errno.EIO)

        # Release and re-open, now we should be able to access again
        self.server.release(fh)
        self.server.forget([(inode.id, 1)])

        # ..but not write access since we are in failsafe mode
        with self.assertRaises(FUSEError) as cm:
            self.server.open(inode.id, os.O_RDWR)
        self.assertEqual(cm.exception.errno, errno.EPERM)

        # ..ready only is fine.
        fh = self.server.open(inode.id, os.O_RDONLY)
        self.server.read(fh, 0, len_)

        # Remove completely, should give error after cache flush
        os.unlink(datafile)
        self.server.read(fh, 3, len_ // 2)
        self.server.cache.clear()
        with self.assertRaises(FUSEError) as cm:
            with catch_logmsg('^Backend lost block',
                              count=1,
                              level=logging.ERROR):
                self.server.read(fh, 5, len_ // 2)
        self.assertEqual(cm.exception.errno, errno.EIO)
Exemplo n.º 4
0
    def test_failsafe(self):
        len_ = self.max_obj_size
        data = self.random_data(len_)
        (fh, inode) = self.server.create(ROOT_INODE, self.newname(),
                                     self.file_mode(), os.O_RDWR, Ctx())
        self.server.write(fh, 0, data)
        self.server.cache.clear()
        self.assertTrue(self.server.failsafe is False)

        datafile = os.path.join(self.backend_dir, 's3ql_data_', 's3ql_data_1')
        shutil.copy(datafile, datafile + '.bak')

        # Modify contents
        with open(datafile, 'rb+') as rfh:
            rfh.seek(560)
            rfh.write(b'blrub!')
        with self.assertRaises(FUSEError) as cm:
            with catch_logmsg('^Backend returned malformed data for',
                              count=1, level=logging.ERROR):
                self.server.read(fh, 0, len_)
        self.assertEqual(cm.exception.errno, errno.EIO)
        self.assertTrue(self.server.failsafe)

        # Restore contents, but should be marked as damaged now
        os.rename(datafile + '.bak', datafile)
        with self.assertRaises(FUSEError) as cm:
            self.server.read(fh, 0, len_)
        self.assertEqual(cm.exception.errno, errno.EIO)

        # Release and re-open, now we should be able to access again
        self.server.release(fh)
        self.server.forget([(inode.id, 1)])

        # ..but not write access since we are in failsafe mode
        with self.assertRaises(FUSEError) as cm:
            self.server.open(inode.id, os.O_RDWR)
        self.assertEqual(cm.exception.errno, errno.EPERM)

        # ..ready only is fine.
        fh = self.server.open(inode.id, os.O_RDONLY)
        self.server.read(fh, 0, len_)

        # Remove completely, should give error after cache flush
        os.unlink(datafile)
        self.server.read(fh, 3, len_//2)
        self.server.cache.clear()
        with self.assertRaises(FUSEError) as cm:
            with catch_logmsg('^Backend lost block',
                              count=1, level=logging.ERROR):
                self.server.read(fh, 5, len_//2)
        self.assertEqual(cm.exception.errno, errno.EIO)
Exemplo n.º 5
0
def test_conn_abort(backend, monkeypatch):
    '''Close connection while sending data'''

    data = b'hello there, let us see whats going on'
    key = 'borg'
    backend[key] = data

    # Monkeypatch request handler
    handler_class = mock_server.S3CRequestHandler
    def send_data(self, data, count=[0]):
        count[0] += 1
        if count[0] >= 3:
            self.wfile.write(data)
        else:
            self.wfile.write(data[:len(data)//2])
            self.close_connection = True
    monkeypatch.setattr(handler_class, 'send_data', send_data)

    with pytest.raises(ConnectionClosed):
        with catch_logmsg("^Object closed prematurely, can't check MD5",
                          count=1, level=logging.WARNING):
            backend.fetch(key)

    enable_temp_fail(backend)
    assert backend[key] == data
Exemplo n.º 6
0
def test_conn_abort(backend, monkeypatch):
    '''Close connection while sending data'''

    data = b'hello there, let us see whats going on'
    key = 'borg'
    backend[key] = data

    # Monkeypatch request handler
    handler_class = mock_server.S3CRequestHandler

    def send_data(self, data, count=[0]):
        count[0] += 1
        if count[0] >= 3:
            self.wfile.write(data)
        else:
            self.wfile.write(data[:len(data) // 2])
            self.close_connection = True

    monkeypatch.setattr(handler_class, 'send_data', send_data)

    with pytest.raises(ConnectionClosed):
        with catch_logmsg("^Object closed prematurely, can't check MD5",
                          count=1,
                          level=logging.WARNING):
            backend.fetch(key)

    enable_temp_fail(backend)
    assert backend[key] == data
Exemplo n.º 7
0
def test_corrupted_meta(backend, monkeypatch):
    key = 'brafasel'
    value = b'hello there, let us see whats going on'
    backend[key] = value

    # Monkeypatch request handler to mess up metadata
    handler_class = mock_server.S3CRequestHandler
    def send_header(self, keyword ,value, count=[0],
                    send_header_real=handler_class.send_header):
        if keyword == self.hdr_prefix + 'Meta-md5':
            count[0] += 1
            if count[0] <= 3:
                value = value[::-1]
        return send_header_real(self, keyword, value)
    monkeypatch.setattr(handler_class, 'send_header', send_header)

    with catch_logmsg('^MD5 mismatch in metadata for', count=1, level=logging.WARNING):
        assert_raises(BadDigestError, backend.fetch, key)

    enable_temp_fail(backend)
    with catch_logmsg('^MD5 mismatch in metadata for', count=2, level=logging.WARNING):
        assert backend[key] == value
Exemplo n.º 8
0
def test_retrieve(backend, db):
    plain_backend = backend
    backend = ComprencBackend(b'schnorz', ('zlib', 6), plain_backend)

    # Create a few objects in db
    obj_ids = (22, 25, 30, 31)
    for id_ in obj_ids:
        db.execute('INSERT INTO objects (id, refcount, size) VALUES(?, ?, ?)',
                   (id_, 1, 27 * id_))

    # Object one will be missing in backend

    # Object two will have a checksum error in the data
    key = 's3ql_data_%d' % obj_ids[1]
    backend[key] = b'some data that will be broken on a data check'
    (raw, meta) = plain_backend.fetch(key)
    raw = bytearray(raw)
    assert len(raw) > 20
    raw[-10:-6] = b'forg'
    plain_backend.store(key, raw, meta)

    # Object three will have a checksum error in the metadata
    key = 's3ql_data_%d' % obj_ids[2]
    backend.store(
        key, b'some data that will be broken on a metadata check', {
            'meta-key1': 'some textual data that just increases',
            'meta-key2': 'the metadata size so that we can tamper with it'
        })
    meta = plain_backend.lookup(key)
    raw = bytearray(meta['data'])
    assert len(raw) > 20
    raw[-10:-6] = b'forg'
    meta['data'] = raw
    plain_backend.update_meta(key, meta)

    # Object four will be ok
    backend['s3ql_data_%d' % obj_ids[3]] = b'some data that is well'

    # When using a single thread, we can fake the backend factory
    def backend_factory():
        return backend

    missing_fh = io.StringIO()
    corrupted_fh = io.StringIO()

    with catch_logmsg('^Backend seems to have lost', count=1, level=logging.WARNING), \
         catch_logmsg('^Object %d is corrupted', count=1, level=logging.WARNING):
        verify.retrieve_objects(db,
                                backend_factory,
                                corrupted_fh,
                                missing_fh,
                                thread_count=1,
                                full=False)
    assert missing_fh.getvalue() == 's3ql_data_%d\n' % obj_ids[0]
    assert corrupted_fh.getvalue() == 's3ql_data_%d\n' % obj_ids[2]

    missing_fh = io.StringIO()
    corrupted_fh = io.StringIO()
    with catch_logmsg('^Backend seems to have lost', count=1, level=logging.WARNING), \
         catch_logmsg('^Object %d is corrupted', count=2, level=logging.WARNING):
        verify.retrieve_objects(db,
                                backend_factory,
                                corrupted_fh,
                                missing_fh,
                                thread_count=1,
                                full=True)
    assert missing_fh.getvalue() == 's3ql_data_%d\n' % obj_ids[0]
    assert corrupted_fh.getvalue() == ('s3ql_data_%d\n' * 2) % obj_ids[1:3]
Exemplo n.º 9
0
    def test_thread_hang(self):
        # Make sure that we don't deadlock if uploads threads or removal
        # threads have died and we try to expire or terminate

        # Monkeypatch to avoid error messages about uncaught exceptions
        # in other threads
        upload_exc = False
        removal_exc = False
        def _upload_loop(*a, fn=self.cache._upload_loop):
            try:
                return fn(*a)
            except NotADirectoryError:
                nonlocal upload_exc
                upload_exc = True
        def _removal_loop(*a, fn=self.cache._removal_loop):
            try:
                return fn(*a)
            except NotADirectoryError:
                nonlocal removal_exc
                removal_exc = True
        self.cache._upload_loop = _upload_loop
        self.cache._removal_loop = _removal_loop

        # Start threads
        self.cache.init(threads=3)

        # Create first object (we'll try to remove that)
        with self.cache.get(self.inode, 0) as fh:
            fh.write(b'bar wurfz!')
        self.cache.commit()
        self.cache.wait()

        # Make sure that upload and removal will fail
        os.rename(self.backend_dir, self.backend_dir + '-tmp')
        open(self.backend_dir, 'w').close()

        # Create second object (we'll try to upload that)
        with self.cache.get(self.inode, 1) as fh:
            fh.write(b'bar wurfz number two!')

        # Schedule a removal
        self.cache.remove(self.inode, 0)

        try:
            # Try to clean-up (implicitly calls expire)
            with llfuse.lock_released, \
                catch_logmsg('Unable to flush cache, no upload threads left alive',
                              level=logging.ERROR, count=1):
                with pytest.raises(OSError) as exc_info:
                     self.cache.destroy()
                assert exc_info.value.errno == errno.ENOTEMPTY
            assert upload_exc
            assert removal_exc
        finally:
            # Fix backend dir
            os.unlink(self.backend_dir)
            os.rename(self.backend_dir + '-tmp', self.backend_dir)

            # Remove objects from cache and make final destroy
            # call into no-op.
            self.cache.remove(self.inode, 1)
            self.cache.destroy = lambda: None
Exemplo n.º 10
0
    def test_thread_hang(self):
        # Make sure that we don't deadlock if uploads threads or removal
        # threads have died and we try to expire or terminate

        # Monkeypatch to avoid error messages about uncaught exceptions
        # in other threads
        upload_exc = False
        removal_exc = False

        def _upload_loop(*a, fn=self.cache._upload_loop):
            try:
                return fn(*a)
            except NotADirectoryError:
                nonlocal upload_exc
                upload_exc = True

        def _removal_loop(*a, fn=self.cache._removal_loop):
            try:
                return fn(*a)
            except NotADirectoryError:
                nonlocal removal_exc
                removal_exc = True

        self.cache._upload_loop = _upload_loop
        self.cache._removal_loop = _removal_loop

        # Start threads
        self.cache.init(threads=3)

        # Create first object (we'll try to remove that)
        with self.cache.get(self.inode, 0) as fh:
            fh.write(b'bar wurfz!')
        self.cache.commit()
        self.cache.wait()

        # Make sure that upload and removal will fail
        os.rename(self.backend_dir, self.backend_dir + '-tmp')
        open(self.backend_dir, 'w').close()

        # Create second object (we'll try to upload that)
        with self.cache.get(self.inode, 1) as fh:
            fh.write(b'bar wurfz number two!')

        # Schedule a removal
        self.cache.remove(self.inode, 0)

        try:
            # Try to clean-up (implicitly calls expire)
            with llfuse.lock_released, \
                catch_logmsg('Unable to flush cache, no upload threads left alive',
                              level=logging.ERROR, count=1):
                with pytest.raises(OSError) as exc_info:
                    self.cache.destroy()
                assert exc_info.value.errno == errno.ENOTEMPTY
            assert upload_exc
            assert removal_exc
        finally:
            # Fix backend dir
            os.unlink(self.backend_dir)
            os.rename(self.backend_dir + '-tmp', self.backend_dir)

            # Remove objects from cache and make final destroy
            # call into no-op.
            self.cache.remove(self.inode, 1)
            self.cache.destroy = lambda: None
Exemplo n.º 11
0
def test_logging():
    inst = NthAttempt(6)
    with catch_logmsg(r'^Encountered %s \(%s\), retrying ',
                      count=2, level=logging.WARNING):
        inst.do_stuff()
Exemplo n.º 12
0
def test_logging():
    inst = NthAttempt(6)
    with catch_logmsg(r'^Encountered %s \(%s\), retrying ',
                      count=2,
                      level=logging.WARNING):
        inst.do_stuff()
Exemplo n.º 13
0
def test_logging():
    inst = NthAttempt(6)
    with catch_logmsg('^Encountered %s exception', 
                      count=2, level=logging.WARNING):
        inst.do_stuff()