Exemple #1
0
def test_corrupted_meta(backend, monkeypatch):
    key = 'brafasel'
    value = b'hello there, let us see whats going on'
    backend[key] = value

    # Monkeypatch request handler to mess up metadata
    handler_class = mock_server.S3CRequestHandler

    def send_header(self,
                    keyword,
                    value,
                    count=[0],
                    send_header_real=handler_class.send_header):
        if keyword == self.hdr_prefix + 'Meta-md5':
            count[0] += 1
            if count[0] <= 3:
                value = value[::-1]
        return send_header_real(self, keyword, value)

    monkeypatch.setattr(handler_class, 'send_header', send_header)

    with assert_logs('^MD5 mismatch in metadata for',
                     count=1,
                     level=logging.WARNING):
        assert_raises(BadDigestError, backend.fetch, key)

    enable_temp_fail(backend)
    with assert_logs('^MD5 mismatch in metadata for',
                     count=2,
                     level=logging.WARNING):
        assert backend[key] == value
Exemple #2
0
def test_retrieve(backend, db):
    plain_backend = backend
    backend = ComprencBackend(b'schnorz', ('zlib', 6), plain_backend)

    # Create a few objects in db
    obj_ids = (22, 25, 30, 31)
    for id_ in obj_ids:
        db.execute('INSERT INTO objects (id, refcount, size) VALUES(?, ?, ?)',
                   (id_, 1, 27 * id_))

    # Object one will be missing in backend

    # Object two will have a checksum error in the data
    key = 's3ql_data_%d' % obj_ids[1]
    backend[key] = b'some data that will be broken on a data check'
    (raw, meta) = plain_backend.fetch(key)
    raw = bytearray(raw)
    assert len(raw) > 20
    raw[-10:-6] = b'forg'
    plain_backend.store(key, raw, meta)

    # Object three will have a checksum error in the metadata
    key = 's3ql_data_%d' % obj_ids[2]
    backend.store(key, b'some data that will be broken on a metadata check',
                  { 'meta-key1': 'some textual data that just increases',
                    'meta-key2': 'the metadata size so that we can tamper with it' })
    meta = plain_backend.lookup(key)
    raw = bytearray(meta['data'])
    assert len(raw) > 20
    raw[-10:-6] = b'forg'
    meta['data'] = raw
    plain_backend.update_meta(key, meta)

    # Object four will be ok
    backend['s3ql_data_%d' % obj_ids[3]] = b'some data that is well'

    # When using a single thread, we can fake the backend factory
    def backend_factory():
        return backend

    missing_fh = io.StringIO()
    corrupted_fh = io.StringIO()

    with assert_logs('^Backend seems to have lost', count=1, level=logging.WARNING), \
         assert_logs('^Object %d is corrupted', count=1, level=logging.WARNING):
        verify.retrieve_objects(db, backend_factory, corrupted_fh, missing_fh,
                                thread_count=1, full=False)
    assert missing_fh.getvalue() == 's3ql_data_%d\n' % obj_ids[0]
    assert corrupted_fh.getvalue() == 's3ql_data_%d\n' % obj_ids[2]

    missing_fh = io.StringIO()
    corrupted_fh = io.StringIO()
    with assert_logs('^Backend seems to have lost', count=1, level=logging.WARNING), \
         assert_logs('^Object %d is corrupted', count=2, level=logging.WARNING):
        verify.retrieve_objects(db, backend_factory, corrupted_fh, missing_fh,
                                thread_count=1, full=True)
    assert missing_fh.getvalue() == 's3ql_data_%d\n' % obj_ids[0]
    assert corrupted_fh.getvalue() == ('s3ql_data_%d\n'*2) % obj_ids[1:3]
Exemple #3
0
async def test_failsafe(ctx):
    len_ = ctx.max_obj_size
    data = random_data(len_)
    (fi, inode) = await ctx.server.create(ROOT_INODE, newname(ctx),
                                 file_mode(), os.O_RDWR, some_ctx)
    fh = fi.fh
    await ctx.server.write(fh, 0, data)
    await ctx.cache.drop()
    assert not ctx.server.failsafe

    datafile = os.path.join(ctx.backend_dir, 's3ql_data_', 's3ql_data_1')
    shutil.copy(datafile, datafile + '.bak')

    # Modify contents
    with open(datafile, 'rb+') as rfh:
        rfh.seek(560)
        rfh.write(b'blrub!')
    with assert_raises(FUSEError) as cm:
        with assert_logs('^Backend returned malformed data for',
                          count=1, level=logging.ERROR):
            await ctx.server.read(fh, 0, len_)
    assert cm.value.errno == errno.EIO
    assert ctx.server.failsafe

    # Restore contents, but should be marked as damaged now
    os.rename(datafile + '.bak', datafile)
    with assert_raises(FUSEError) as cm:
        await ctx.server.read(fh, 0, len_)
    assert cm.value.errno == errno.EIO

    # Release and re-open, now we should be able to access again
    await ctx.server.release(fh)
    await ctx.server.forget([(inode.st_ino, 1)])

    # ..but not write access since we are in failsafe mode
    with assert_raises(FUSEError) as cm:
        await ctx.server.open(inode.st_ino, os.O_RDWR, some_ctx)
    assert cm.value.errno == errno.EPERM

    # ..ready only is fine.
    fi = await ctx.server.open(inode.st_ino, os.O_RDONLY, some_ctx)
    fh = fi.fh
    await ctx.server.read(fh, 0, len_)

    # Remove completely, should give error after cache flush
    os.unlink(datafile)
    await ctx.server.read(fh, 3, len_//2)
    await ctx.cache.drop()
    with assert_raises(FUSEError) as cm:
        with assert_logs('^Backend lost block',
                          count=1, level=logging.ERROR):
            await ctx.server.read(fh, 5, len_//2)
    assert cm.value.errno == errno.EIO
Exemple #4
0
    def test_failsafe(self):
        len_ = self.max_obj_size
        data = self.random_data(len_)
        (fh, inode) = self.server.create(ROOT_INODE, self.newname(),
                                         self.file_mode(), os.O_RDWR, some_ctx)
        self.server.write(fh, 0, data)
        self.server.cache.clear()
        self.assertTrue(self.server.failsafe is False)

        datafile = os.path.join(self.backend_dir, 's3ql_data_', 's3ql_data_1')
        shutil.copy(datafile, datafile + '.bak')

        # Modify contents
        with open(datafile, 'rb+') as rfh:
            rfh.seek(560)
            rfh.write(b'blrub!')
        with self.assertRaises(FUSEError) as cm:
            with assert_logs('^Backend returned malformed data for',
                             count=1,
                             level=logging.ERROR):
                self.server.read(fh, 0, len_)
        self.assertEqual(cm.exception.errno, errno.EIO)
        self.assertTrue(self.server.failsafe)

        # Restore contents, but should be marked as damaged now
        os.rename(datafile + '.bak', datafile)
        with self.assertRaises(FUSEError) as cm:
            self.server.read(fh, 0, len_)
        self.assertEqual(cm.exception.errno, errno.EIO)

        # Release and re-open, now we should be able to access again
        self.server.release(fh)
        self.server.forget([(inode.st_ino, 1)])

        # ..but not write access since we are in failsafe mode
        with self.assertRaises(FUSEError) as cm:
            self.server.open(inode.st_ino, os.O_RDWR, some_ctx)
        self.assertEqual(cm.exception.errno, errno.EPERM)

        # ..ready only is fine.
        fh = self.server.open(inode.st_ino, os.O_RDONLY, some_ctx)
        self.server.read(fh, 0, len_)

        # Remove completely, should give error after cache flush
        os.unlink(datafile)
        self.server.read(fh, 3, len_ // 2)
        self.server.cache.clear()
        with self.assertRaises(FUSEError) as cm:
            with assert_logs('^Backend lost block',
                             count=1,
                             level=logging.ERROR):
                self.server.read(fh, 5, len_ // 2)
        self.assertEqual(cm.exception.errno, errno.EIO)
Exemple #5
0
def test_failsafe(ctx):
    len_ = ctx.max_obj_size
    data = random_data(len_)
    (fh, inode) = ctx.server.create(ROOT_INODE, newname(ctx),
                                 file_mode(), os.O_RDWR, some_ctx)
    ctx.server.write(fh, 0, data)
    ctx.server.cache.drop()
    assert not ctx.server.failsafe

    datafile = os.path.join(ctx.backend_dir, 's3ql_data_', 's3ql_data_1')
    shutil.copy(datafile, datafile + '.bak')

    # Modify contents
    with open(datafile, 'rb+') as rfh:
        rfh.seek(560)
        rfh.write(b'blrub!')
    with assert_raises(FUSEError) as cm:
        with assert_logs('^Backend returned malformed data for',
                          count=1, level=logging.ERROR):
            ctx.server.read(fh, 0, len_)
    assert cm.value.errno == errno.EIO
    assert ctx.server.failsafe

    # Restore contents, but should be marked as damaged now
    os.rename(datafile + '.bak', datafile)
    with assert_raises(FUSEError) as cm:
        ctx.server.read(fh, 0, len_)
    assert cm.value.errno == errno.EIO

    # Release and re-open, now we should be able to access again
    ctx.server.release(fh)
    ctx.server.forget([(inode.st_ino, 1)])

    # ..but not write access since we are in failsafe mode
    with assert_raises(FUSEError) as cm:
        ctx.server.open(inode.st_ino, os.O_RDWR, some_ctx)
    assert cm.value.errno == errno.EPERM

    # ..ready only is fine.
    fh = ctx.server.open(inode.st_ino, os.O_RDONLY, some_ctx)
    ctx.server.read(fh, 0, len_)

    # Remove completely, should give error after cache flush
    os.unlink(datafile)
    ctx.server.read(fh, 3, len_//2)
    ctx.server.cache.drop()
    with assert_raises(FUSEError) as cm:
        with assert_logs('^Backend lost block',
                          count=1, level=logging.ERROR):
            ctx.server.read(fh, 5, len_//2)
    assert cm.value.errno == errno.EIO
Exemple #6
0
    def test_failsafe(self):
        len_ = self.max_obj_size
        data = self.random_data(len_)
        (fh, inode) = self.server.create(ROOT_INODE, self.newname(),
                                     self.file_mode(), os.O_RDWR, some_ctx)
        self.server.write(fh, 0, data)
        self.server.cache.clear()
        self.assertTrue(self.server.failsafe is False)

        datafile = os.path.join(self.backend_dir, 's3ql_data_', 's3ql_data_1')
        shutil.copy(datafile, datafile + '.bak')

        # Modify contents
        with open(datafile, 'rb+') as rfh:
            rfh.seek(560)
            rfh.write(b'blrub!')
        with self.assertRaises(FUSEError) as cm:
            with assert_logs('^Backend returned malformed data for',
                              count=1, level=logging.ERROR):
                self.server.read(fh, 0, len_)
        self.assertEqual(cm.exception.errno, errno.EIO)
        self.assertTrue(self.server.failsafe)

        # Restore contents, but should be marked as damaged now
        os.rename(datafile + '.bak', datafile)
        with self.assertRaises(FUSEError) as cm:
            self.server.read(fh, 0, len_)
        self.assertEqual(cm.exception.errno, errno.EIO)

        # Release and re-open, now we should be able to access again
        self.server.release(fh)
        self.server.forget([(inode.st_ino, 1)])

        # ..but not write access since we are in failsafe mode
        with self.assertRaises(FUSEError) as cm:
            self.server.open(inode.st_ino, os.O_RDWR, some_ctx)
        self.assertEqual(cm.exception.errno, errno.EPERM)

        # ..ready only is fine.
        fh = self.server.open(inode.st_ino, os.O_RDONLY, some_ctx)
        self.server.read(fh, 0, len_)

        # Remove completely, should give error after cache flush
        os.unlink(datafile)
        self.server.read(fh, 3, len_//2)
        self.server.cache.clear()
        with self.assertRaises(FUSEError) as cm:
            with assert_logs('^Backend lost block',
                              count=1, level=logging.ERROR):
                self.server.read(fh, 5, len_//2)
        self.assertEqual(cm.exception.errno, errno.EIO)
Exemple #7
0
def test_conn_abort(backend, monkeypatch):
    '''Close connection while sending data'''

    data = b'hello there, let us see whats going on'
    key = 'borg'
    backend[key] = data

    # Monkeypatch request handler
    handler_class = mock_server.S3CRequestHandler

    def send_data(self, data, count=[0]):
        count[0] += 1
        if count[0] >= 3:
            self.wfile.write(data)
        else:
            self.wfile.write(data[:len(data) // 2])
            self.close_connection = True

    monkeypatch.setattr(handler_class, 'send_data', send_data)

    with pytest.raises(ConnectionClosed):
        with assert_logs("^Object closed prematurely, can't check MD5",
                         count=1,
                         level=logging.WARNING):
            backend.fetch(key)

    enable_temp_fail(backend)
    assert backend[key] == data
Exemple #8
0
def test_conn_abort(backend, monkeypatch):
    '''Close connection while sending data'''

    data = b'hello there, let us see whats going on'
    key = 'borg'
    backend[key] = data

    # Monkeypatch request handler
    handler_class = mock_server.S3CRequestHandler
    def send_data(self, data, count=[0]):
        count[0] += 1
        if count[0] >= 3:
            self.wfile.write(data)
        else:
            self.wfile.write(data[:len(data)//2])
            self.close_connection = True
    monkeypatch.setattr(handler_class, 'send_data', send_data)

    with pytest.raises(ConnectionClosed):
        with assert_logs("^Object closed prematurely, can't check MD5",
                          count=1, level=logging.WARNING):
            backend.fetch(key)

    enable_temp_fail(backend)
    assert backend[key] == data
Exemple #9
0
def test_missing(backend, db, full):
    # Create two objects, one will be missing
    obj_ids = (22, 25)
    for id_ in obj_ids:
        db.execute('INSERT INTO objects (id, refcount, size) VALUES(?, ?, ?)',
                   (id_, 1, 27 * id_))
    key = 's3ql_data_%d' % obj_ids[0]
    backend[key] = b'just some data that no-one really cares about'

    # When using a single thread, we can fake the backend factory
    backend_factory = lambda: backend

    missing_fh = io.StringIO()
    corrupted_fh = io.StringIO()
    with assert_logs('^Backend seems to have lost',
                     count=1,
                     level=logging.WARNING):
        verify.retrieve_objects(db,
                                backend_factory,
                                corrupted_fh,
                                missing_fh,
                                thread_count=1,
                                full=full)
    assert missing_fh.getvalue() == 's3ql_data_%d\n' % obj_ids[1]
    assert corrupted_fh.getvalue() == ''
async def test_thread_hang(ctx):
    # Make sure that we don't deadlock if uploads threads or removal
    # threads have died and we try to expire or terminate

    # Monkeypatch to avoid error messages about uncaught exceptions
    # in other threads
    upload_exc = False
    removal_exc = False
    def _upload_loop(*a, fn=ctx.cache._upload_loop):
        try:
            return fn(*a)
        except NotADirectoryError:
            nonlocal upload_exc
            upload_exc = True
    def _removal_loop_multi(*a, fn=ctx.cache._removal_loop_multi):
        try:
            return fn(*a)
        except NotADirectoryError:
            nonlocal removal_exc
            removal_exc = True
    ctx.cache._upload_loop = _upload_loop
    ctx.cache._removal_loop_multi = _removal_loop_multi

    # Start threads
    ctx.cache.init(threads=3)

    # Create first object (we'll try to remove that)
    async with ctx.cache.get(ctx.inode, 0) as fh:
        fh.write(b'bar wurfz!')
    await ctx.cache.start_flush()
    await ctx.cache.wait()

    # Make sure that upload and removal will fail
    os.rename(ctx.backend_dir, ctx.backend_dir + '-tmp')
    open(ctx.backend_dir, 'w').close()

    # Create second object (we'll try to upload that)
    async with ctx.cache.get(ctx.inode, 1) as fh:
        fh.write(b'bar wurfz number two!')

    # Schedule a removal
    await ctx.cache.remove(ctx.inode, 0)

    try:
        # Try to clean-up (implicitly calls expire)
        with assert_logs('Unable to flush cache, no upload threads left alive',
                         level=logging.ERROR, count=1):
            await ctx.cache.destroy(keep_cache=True)
        assert upload_exc
        assert removal_exc
    finally:
        # Fix backend dir
        os.unlink(ctx.backend_dir)
        os.rename(ctx.backend_dir + '-tmp', ctx.backend_dir)

        # Remove objects from cache and make final destroy
        # call into no-op.
        await ctx.cache.remove(ctx.inode, 1)
        ctx.cache.destroy = None
Exemple #11
0
def test_corrupted_meta(backend, monkeypatch):
    key = 'brafasel'
    value = b'hello there, let us see whats going on'
    backend[key] = value

    # Monkeypatch request handler to mess up metadata
    handler_class = mock_server.S3CRequestHandler
    def send_header(self, keyword ,value, count=[0],
                    send_header_real=handler_class.send_header):
        if keyword == self.hdr_prefix + 'Meta-md5':
            count[0] += 1
            if count[0] <= 3:
                value = value[::-1]
        return send_header_real(self, keyword, value)
    monkeypatch.setattr(handler_class, 'send_header', send_header)

    with assert_logs('^MD5 mismatch in metadata for', count=1, level=logging.WARNING):
        assert_raises(BadDigestError, backend.fetch, key)

    enable_temp_fail(backend)
    with assert_logs('^MD5 mismatch in metadata for', count=2, level=logging.WARNING):
        assert backend[key] == value
Exemple #12
0
def test_corrupted_body(backend, db, full):
    obj_ids = (35, 40)
    for id_ in obj_ids:
        db.execute('INSERT INTO objects (id, refcount, size) VALUES(?, ?, ?)',
                   (id_, 1, 27 * id_))

    # Object one will be fine
    key = 's3ql_data_%d' % obj_ids[0]
    backend[key] = b'just some data that no-one really cares about'

    # Object two will have a checksum error in the body
    key = 's3ql_data_%d' % obj_ids[1]
    backend[key] = b'some data that will be broken on a data check'
    (raw, meta) = backend.backend.fetch(key)
    raw = bytearray(raw)
    assert len(raw) > 20
    raw[-10:-6] = b'forg'
    backend.backend.store(key, raw, meta)

    # When using a single thread, we can fake the backend factory
    backend_factory = lambda: backend

    missing_fh = io.StringIO()
    corrupted_fh = io.StringIO()

    if full:
        with assert_logs('^Object %d is corrupted',
                         count=1,
                         level=logging.WARNING):
            verify.retrieve_objects(db,
                                    backend_factory,
                                    corrupted_fh,
                                    missing_fh,
                                    thread_count=1,
                                    full=full)
            assert missing_fh.getvalue() == ''
            assert corrupted_fh.getvalue() == 's3ql_data_%d\n' % obj_ids[1]
    else:
        # Should not show up when looking just at HEAD
        verify.retrieve_objects(db,
                                backend_factory,
                                corrupted_fh,
                                missing_fh,
                                thread_count=1,
                                full=full)
        assert missing_fh.getvalue() == ''
        assert corrupted_fh.getvalue() == ''
Exemple #13
0
def test_corrupted_head(backend, db, full):
    obj_ids = (30, 31)
    for id_ in obj_ids:
        db.execute('INSERT INTO objects (id, refcount, size) VALUES(?, ?, ?)',
                   (id_, 1, 27 * id_))

    # Object one will be fine
    key = 's3ql_data_%d' % obj_ids[0]
    backend[key] = b'just some data that no-one really cares about'

    # Object two will have a checksum error in the metadata
    key = 's3ql_data_%d' % obj_ids[1]
    backend.store(
        key, b'some data that will be broken on a metadata check', {
            'meta-key1': 'some textual data that just increases',
            'meta-key2': 'the metadata size so that we can tamper with it'
        })
    meta = backend.backend.lookup(key)
    raw = bytearray(meta['data'])
    assert len(raw) > 20
    raw[-10:-6] = b'forg'
    meta['data'] = raw
    backend.backend.update_meta(key, meta)

    # When using a single thread, we can fake the backend factory
    backend_factory = lambda: backend

    missing_fh = io.StringIO()
    corrupted_fh = io.StringIO()
    with assert_logs('^Object %d is corrupted', count=1,
                     level=logging.WARNING):
        verify.retrieve_objects(db,
                                backend_factory,
                                corrupted_fh,
                                missing_fh,
                                thread_count=1,
                                full=full)
    assert missing_fh.getvalue() == ''
    assert corrupted_fh.getvalue() == 's3ql_data_%d\n' % obj_ids[1]
Exemple #14
0
    def test_thread_hang(self):
        # Make sure that we don't deadlock if uploads threads or removal
        # threads have died and we try to expire or terminate

        # Monkeypatch to avoid error messages about uncaught exceptions
        # in other threads
        upload_exc = False
        removal_exc = False
        def _upload_loop(*a, fn=self.cache._upload_loop):
            try:
                return fn(*a)
            except NotADirectoryError:
                nonlocal upload_exc
                upload_exc = True
        def _removal_loop(*a, fn=self.cache._removal_loop):
            try:
                return fn(*a)
            except NotADirectoryError:
                nonlocal removal_exc
                removal_exc = True
        self.cache._upload_loop = _upload_loop
        self.cache._removal_loop = _removal_loop

        # Start threads
        self.cache.init(threads=3)

        # Create first object (we'll try to remove that)
        with self.cache.get(self.inode, 0) as fh:
            fh.write(b'bar wurfz!')
        self.cache.commit()
        self.cache.wait()

        # Make sure that upload and removal will fail
        os.rename(self.backend_dir, self.backend_dir + '-tmp')
        open(self.backend_dir, 'w').close()

        # Create second object (we'll try to upload that)
        with self.cache.get(self.inode, 1) as fh:
            fh.write(b'bar wurfz number two!')

        # Schedule a removal
        self.cache.remove(self.inode, 0)

        try:
            # Try to clean-up (implicitly calls expire)
            with llfuse.lock_released, \
                assert_logs('Unable to flush cache, no upload threads left alive',
                              level=logging.ERROR, count=1):
                with pytest.raises(OSError) as exc_info:
                     self.cache.destroy()
                assert exc_info.value.errno == errno.ENOTEMPTY
            assert upload_exc
            assert removal_exc
        finally:
            # Fix backend dir
            os.unlink(self.backend_dir)
            os.rename(self.backend_dir + '-tmp', self.backend_dir)

            # Remove objects from cache and make final destroy
            # call into no-op.
            self.cache.remove(self.inode, 1)
            self.cache.destroy = lambda: None
Exemple #15
0
    def test_thread_hang(self):
        # Make sure that we don't deadlock if uploads threads or removal
        # threads have died and we try to expire or terminate

        # Monkeypatch to avoid error messages about uncaught exceptions
        # in other threads
        upload_exc = False
        removal_exc = False

        def _upload_loop(*a, fn=self.cache._upload_loop):
            try:
                return fn(*a)
            except NotADirectoryError:
                nonlocal upload_exc
                upload_exc = True

        def _removal_loop(*a, fn=self.cache._removal_loop):
            try:
                return fn(*a)
            except NotADirectoryError:
                nonlocal removal_exc
                removal_exc = True

        self.cache._upload_loop = _upload_loop
        self.cache._removal_loop = _removal_loop

        # Start threads
        self.cache.init(threads=3)

        # Create first object (we'll try to remove that)
        with self.cache.get(self.inode, 0) as fh:
            fh.write(b'bar wurfz!')
        self.cache.start_flush()
        self.cache.wait()

        # Make sure that upload and removal will fail
        os.rename(self.backend_dir, self.backend_dir + '-tmp')
        open(self.backend_dir, 'w').close()

        # Create second object (we'll try to upload that)
        with self.cache.get(self.inode, 1) as fh:
            fh.write(b'bar wurfz number two!')

        # Schedule a removal
        self.cache.remove(self.inode, 0)

        try:
            # Try to clean-up (implicitly calls expire)
            with assert_logs(
                    'Unable to drop cache, no upload threads left alive',
                    level=logging.ERROR,
                    count=1):
                with pytest.raises(OSError) as exc_info:
                    self.cache.destroy()
                assert exc_info.value.errno == errno.ENOTEMPTY
            assert upload_exc
            assert removal_exc
        finally:
            # Fix backend dir
            os.unlink(self.backend_dir)
            os.rename(self.backend_dir + '-tmp', self.backend_dir)

            # Remove objects from cache and make final destroy
            # call into no-op.
            self.cache.remove(self.inode, 1)
            self.cache.destroy = lambda: None
Exemple #16
0
def test_logging():
    inst = NthAttempt(6)
    with assert_logs(r'^Encountered %s \(%s\), retrying ',
                     count=2,
                     level=logging.WARNING):
        inst.do_stuff()
Exemple #17
0
def test_logging():
    inst = NthAttempt(6)
    with assert_logs(r'^Encountered %s \(%s\), retrying ',
                      count=2, level=logging.WARNING):
        inst.do_stuff()