Example #1
0
    def test_delete_db(self):
        def init_stub(conn, put_timestamp):
            conn.execute('CREATE TABLE test (one TEXT)')
            conn.execute('CREATE TABLE test_stat (id TEXT)')
            conn.execute('INSERT INTO test_stat (id) VALUES (?)',
                        (str(uuid4),))
            conn.execute('INSERT INTO test (one) VALUES ("1")')
            conn.commit()
        stub_called = [False]

        def delete_stub(*a, **kw):
            stub_called[0] = True
        broker = DatabaseBroker(':memory:')
        broker.db_type = 'test'
        broker._initialize = init_stub
        # Initializes a good broker for us
        broker.initialize(normalize_timestamp('1'))
        self.assert_(broker.conn is not None)
        broker._delete_db = delete_stub
        stub_called[0] = False
        broker.delete_db('2')
        self.assert_(stub_called[0])
        broker = DatabaseBroker(os.path.join(self.testdir, '1.db'))
        broker.db_type = 'test'
        broker._initialize = init_stub
        broker.initialize(normalize_timestamp('1'))
        broker._delete_db = delete_stub
        stub_called[0] = False
        broker.delete_db('2')
        self.assert_(stub_called[0])
        # ensure that metadata was cleared
        m2 = broker.metadata
        self.assert_(not any(v[0] for v in m2.itervalues()))
        self.assert_(all(v[1] == normalize_timestamp('2')
                         for v in m2.itervalues()))
Example #2
0
    def test_reap_delay(self):
        time_value = [100]

        def _time():
            return time_value[0]

        time_orig = reaper.time
        try:
            reaper.time = _time
            r = reaper.AccountReaper({"delay_reaping": "10"})
            b = FakeBroker()
            b.info["delete_timestamp"] = normalize_timestamp(110)
            self.assertFalse(r.reap_account(b, 0, None))
            b.info["delete_timestamp"] = normalize_timestamp(100)
            self.assertFalse(r.reap_account(b, 0, None))
            b.info["delete_timestamp"] = normalize_timestamp(90)
            self.assertFalse(r.reap_account(b, 0, None))
            # KeyError raised immediately as reap_account tries to get the
            # account's name to do the reaping.
            b.info["delete_timestamp"] = normalize_timestamp(89)
            self.assertRaises(KeyError, r.reap_account, b, 0, None)
            b.info["delete_timestamp"] = normalize_timestamp(1)
            self.assertRaises(KeyError, r.reap_account, b, 0, None)
        finally:
            reaper.time = time_orig
Example #3
0
    def test_hash_cleanup_listdir(self):
        file_list = []

        def mock_listdir(path):
            return list(file_list)

        def mock_unlink(path):
            file_list.remove(os.path.basename(path))

        with unit_mock({"os.listdir": mock_listdir, "os.unlink": mock_unlink}):
            # purge .data if there's a newer .ts
            file1 = normalize_timestamp(time()) + ".data"
            file2 = normalize_timestamp(time() + 1) + ".ts"
            file_list = [file1, file2]
            self.assertEquals(diskfile.hash_cleanup_listdir("/whatever"), [file2])

            # purge .ts if there's a newer .data
            file1 = normalize_timestamp(time()) + ".ts"
            file2 = normalize_timestamp(time() + 1) + ".data"
            file_list = [file1, file2]
            self.assertEquals(diskfile.hash_cleanup_listdir("/whatever"), [file2])

            # keep .meta and .data if meta newer than data
            file1 = normalize_timestamp(time()) + ".ts"
            file2 = normalize_timestamp(time() + 1) + ".data"
            file3 = normalize_timestamp(time() + 2) + ".meta"
            file_list = [file1, file2, file3]
            self.assertEquals(diskfile.hash_cleanup_listdir("/whatever"), [file3, file2])

            # keep only latest of multiple .ts files
            file1 = normalize_timestamp(time()) + ".ts"
            file2 = normalize_timestamp(time() + 1) + ".ts"
            file3 = normalize_timestamp(time() + 2) + ".ts"
            file_list = [file1, file2, file3]
            self.assertEquals(diskfile.hash_cleanup_listdir("/whatever"), [file3])
Example #4
0
    def test_delete_db(self):
        def init_stub(conn, put_timestamp):
            conn.execute('CREATE TABLE test (one TEXT)')
            conn.execute('CREATE TABLE test_stat (id TEXT)')
            conn.execute('INSERT INTO test_stat (id) VALUES (?)',
                        (str(uuid4),))
            conn.execute('INSERT INTO test (one) VALUES ("1")')
            conn.commit()
        stub_called = [False]

        def delete_stub(*a, **kw):
            stub_called[0] = True
        broker = DatabaseBroker(':memory:')
        broker.db_type = 'test'
        broker._initialize = init_stub
        # Initializes a good broker for us
        broker.initialize(normalize_timestamp('1'))
        self.assert_(broker.conn is not None)
        broker._delete_db = delete_stub
        stub_called[0] = False
        broker.delete_db('2')
        self.assert_(stub_called[0])
        broker = DatabaseBroker(os.path.join(self.testdir, '1.db'))
        broker.db_type = 'test'
        broker._initialize = init_stub
        broker.initialize(normalize_timestamp('1'))
        broker._delete_db = delete_stub
        stub_called[0] = False
        broker.delete_db('2')
        self.assert_(stub_called[0])
        # ensure that metadata was cleared
        m2 = broker.metadata
        self.assert_(not any(v[0] for v in m2.itervalues()))
        self.assert_(all(v[1] == normalize_timestamp('2')
                         for v in m2.itervalues()))
Example #5
0
    def test_reap_delay(self):
        time_value = [100]

        def _time():
            return time_value[0]

        time_orig = reaper.time
        try:
            reaper.time = _time
            r = reaper.AccountReaper({'delay_reaping': '10'})
            b = FakeBroker()
            b.info['delete_timestamp'] = normalize_timestamp(110)
            self.assertFalse(r.reap_account(b, 0, None))
            b.info['delete_timestamp'] = normalize_timestamp(100)
            self.assertFalse(r.reap_account(b, 0, None))
            b.info['delete_timestamp'] = normalize_timestamp(90)
            self.assertFalse(r.reap_account(b, 0, None))
            # KeyError raised immediately as reap_account tries to get the
            # account's name to do the reaping.
            b.info['delete_timestamp'] = normalize_timestamp(89)
            self.assertRaises(KeyError, r.reap_account, b, 0, None)
            b.info['delete_timestamp'] = normalize_timestamp(1)
            self.assertRaises(KeyError, r.reap_account, b, 0, None)
        finally:
            reaper.time = time_orig
Example #6
0
    def test_unicode(self):
        cu = container_updater.ContainerUpdater({
            'devices': self.devices_dir,
            'mount_check': 'false',
            'swift_dir': self.testdir,
            'interval': '1',
            'concurrency': '1',
            'node_timeout': '15',
        })
        containers_dir = os.path.join(self.sda1, container_server.DATADIR)
        os.mkdir(containers_dir)
        subdir = os.path.join(containers_dir, 'subdir')
        os.mkdir(subdir)
        cb = ContainerBroker(os.path.join(subdir, 'hash.db'),
                             account='a',
                             container='\xce\xa9')
        cb.initialize(normalize_timestamp(1))
        cb.put_object('\xce\xa9', normalize_timestamp(2), 3, 'text/plain',
                      '68b329da9893e34099c7d8ad5cb9c940')

        def accept(sock, addr):
            try:
                with Timeout(3):
                    inc = sock.makefile('rb')
                    out = sock.makefile('wb')
                    out.write('HTTP/1.1 201 OK\r\nContent-Length: 0\r\n\r\n')
                    out.flush()
                    inc.read()
            except BaseException as err:
                import traceback
                traceback.print_exc()
                return err
            return None

        bindsock = listen(('127.0.0.1', 0))

        def spawn_accepts():
            events = []
            for _junk in xrange(2):
                with Timeout(3):
                    sock, addr = bindsock.accept()
                    events.append(spawn(accept, sock, addr))
            return events

        spawned = spawn(spawn_accepts)
        for dev in cu.get_account_ring().devs:
            if dev is not None:
                dev['port'] = bindsock.getsockname()[1]
        cu.run_once()
        for event in spawned.wait():
            err = event.wait()
            if err:
                raise err
        info = cb.get_info()
        self.assertEquals(info['object_count'], 1)
        self.assertEquals(info['bytes_used'], 3)
        self.assertEquals(info['reported_object_count'], 1)
        self.assertEquals(info['reported_bytes_used'], 3)
Example #7
0
    def test_unicode(self):
        cu = container_updater.ContainerUpdater({
            'devices': self.devices_dir,
            'mount_check': 'false',
            'swift_dir': self.testdir,
            'interval': '1',
            'concurrency': '1',
            'node_timeout': '15',
        })
        containers_dir = os.path.join(self.sda1, container_server.DATADIR)
        os.mkdir(containers_dir)
        subdir = os.path.join(containers_dir, 'subdir')
        os.mkdir(subdir)
        cb = ContainerBroker(os.path.join(subdir, 'hash.db'), account='a',
                             container='\xce\xa9')
        cb.initialize(normalize_timestamp(1))
        cb.put_object('\xce\xa9', normalize_timestamp(2), 3, 'text/plain',
                      '68b329da9893e34099c7d8ad5cb9c940')

        def accept(sock, addr):
            try:
                with Timeout(3):
                    inc = sock.makefile('rb')
                    out = sock.makefile('wb')
                    out.write('HTTP/1.1 201 OK\r\nContent-Length: 0\r\n\r\n')
                    out.flush()
                    inc.read()
            except BaseException as err:
                import traceback
                traceback.print_exc()
                return err
            return None

        bindsock = listen(('127.0.0.1', 0))

        def spawn_accepts():
            events = []
            for _junk in xrange(2):
                with Timeout(3):
                    sock, addr = bindsock.accept()
                    events.append(spawn(accept, sock, addr))
            return events

        spawned = spawn(spawn_accepts)
        for dev in cu.get_account_ring().devs:
            if dev is not None:
                dev['port'] = bindsock.getsockname()[1]
        cu.run_once()
        for event in spawned.wait():
            err = event.wait()
            if err:
                raise err
        info = cb.get_info()
        self.assertEquals(info['object_count'], 1)
        self.assertEquals(info['bytes_used'], 3)
        self.assertEquals(info['reported_object_count'], 1)
        self.assertEquals(info['reported_bytes_used'], 3)
Example #8
0
 def PUT(self, req):
     """Handle HTTP PUT request."""
     drive, part, account, container = split_and_validate_path(req, 3, 4)
     if self.mount_check and not check_mount(self.root, drive):
         return HTTPInsufficientStorage(drive=drive, request=req)
     if container:   # put account container
         pending_timeout = None
         if 'x-trans-id' in req.headers:
             pending_timeout = 3
         broker = self._get_account_broker(drive, part, account,
                                           pending_timeout=pending_timeout)
         if account.startswith(self.auto_create_account_prefix) and \
                 not os.path.exists(broker.db_file):
             try:
                 broker.initialize(normalize_timestamp(
                     req.headers.get('x-timestamp') or time.time()))
             except DatabaseAlreadyExists:
                 pass
         if req.headers.get('x-account-override-deleted', 'no').lower() != \
                 'yes' and broker.is_deleted():
             return HTTPNotFound(request=req)
         broker.put_container(container, req.headers['x-put-timestamp'],
                              req.headers['x-delete-timestamp'],
                              req.headers['x-object-count'],
                              req.headers['x-bytes-used'])
         if req.headers['x-delete-timestamp'] > \
                 req.headers['x-put-timestamp']:
             return HTTPNoContent(request=req)
         else:
             return HTTPCreated(request=req)
     else:   # put account
         broker = self._get_account_broker(drive, part, account)
         timestamp = normalize_timestamp(req.headers['x-timestamp'])
         if not os.path.exists(broker.db_file):
             try:
                 broker.initialize(timestamp)
                 created = True
             except DatabaseAlreadyExists:
                 pass
         elif broker.is_status_deleted():
             return self._deleted_response(broker, req, HTTPForbidden,
                                           body='Recently deleted')
         else:
             created = broker.is_deleted()
             broker.update_put_timestamp(timestamp)
             if broker.is_deleted():
                 return HTTPConflict(request=req)
         metadata = {}
         metadata.update((key, (value, timestamp))
                         for key, value in req.headers.iteritems()
                         if key.lower().startswith('x-account-meta-'))
         if metadata:
             broker.update_metadata(metadata)
         if created:
             return HTTPCreated(request=req)
         else:
             return HTTPAccepted(request=req)
Example #9
0
 def test_empty(self):
     # Test AccountBroker.empty
     broker = AccountBroker(':memory:', account='a')
     broker.initialize(normalize_timestamp('1'))
     self.assert_(broker.empty())
     broker.put_container('o', normalize_timestamp(time()), 0, 0, 0)
     self.assert_(not broker.empty())
     sleep(.00001)
     broker.put_container('o', 0, normalize_timestamp(time()), 0, 0)
     self.assert_(broker.empty())
Example #10
0
 def test_empty(self):
     # Test AccountBroker.empty
     broker = AccountBroker(':memory:', account='a')
     broker.initialize(normalize_timestamp('1'))
     self.assert_(broker.empty())
     broker.put_container('o', normalize_timestamp(time()), 0, 0, 0)
     self.assert_(not broker.empty())
     sleep(.00001)
     broker.put_container('o', 0, normalize_timestamp(time()), 0, 0)
     self.assert_(broker.empty())
Example #11
0
 def test_reclaim(self):
     broker = AccountBroker(':memory:', account='test_account')
     broker.initialize(normalize_timestamp('1'))
     broker.put_container('c', normalize_timestamp(time()), 0, 0, 0)
     with broker.get() as conn:
         self.assertEquals(
             conn.execute("SELECT count(*) FROM container "
                          "WHERE deleted = 0").fetchone()[0], 1)
         self.assertEquals(
             conn.execute("SELECT count(*) FROM container "
                          "WHERE deleted = 1").fetchone()[0], 0)
     broker.reclaim(normalize_timestamp(time() - 999), time())
     with broker.get() as conn:
         self.assertEquals(
             conn.execute("SELECT count(*) FROM container "
                          "WHERE deleted = 0").fetchone()[0], 1)
         self.assertEquals(
             conn.execute("SELECT count(*) FROM container "
                          "WHERE deleted = 1").fetchone()[0], 0)
     sleep(.00001)
     broker.put_container('c', 0, normalize_timestamp(time()), 0, 0)
     with broker.get() as conn:
         self.assertEquals(
             conn.execute("SELECT count(*) FROM container "
                          "WHERE deleted = 0").fetchone()[0], 0)
         self.assertEquals(
             conn.execute("SELECT count(*) FROM container "
                          "WHERE deleted = 1").fetchone()[0], 1)
     broker.reclaim(normalize_timestamp(time() - 999), time())
     with broker.get() as conn:
         self.assertEquals(
             conn.execute("SELECT count(*) FROM container "
                          "WHERE deleted = 0").fetchone()[0], 0)
         self.assertEquals(
             conn.execute("SELECT count(*) FROM container "
                          "WHERE deleted = 1").fetchone()[0], 1)
     sleep(.00001)
     broker.reclaim(normalize_timestamp(time()), time())
     with broker.get() as conn:
         self.assertEquals(
             conn.execute("SELECT count(*) FROM container "
                          "WHERE deleted = 0").fetchone()[0], 0)
         self.assertEquals(
             conn.execute("SELECT count(*) FROM container "
                          "WHERE deleted = 1").fetchone()[0], 0)
     # Test reclaim after deletion. Create 3 test containers
     broker.put_container('x', 0, 0, 0, 0)
     broker.put_container('y', 0, 0, 0, 0)
     broker.put_container('z', 0, 0, 0, 0)
     broker.reclaim(normalize_timestamp(time()), time())
     # self.assertEquals(len(res), 2)
     # self.assert_(isinstance(res, tuple))
     # containers, account_name = res
     # self.assert_(containers is None)
     # self.assert_(account_name is None)
     # Now delete the account
     broker.delete_db(normalize_timestamp(time()))
     broker.reclaim(normalize_timestamp(time()), time())
Example #12
0
 def test_reclaim(self):
     broker = AccountBroker(':memory:', account='test_account')
     broker.initialize(normalize_timestamp('1'))
     broker.put_container('c', normalize_timestamp(time()), 0, 0, 0)
     with broker.get() as conn:
         self.assertEqual(conn.execute(
             "SELECT count(*) FROM container "
             "WHERE deleted = 0").fetchone()[0], 1)
         self.assertEqual(conn.execute(
             "SELECT count(*) FROM container "
             "WHERE deleted = 1").fetchone()[0], 0)
     broker.reclaim(normalize_timestamp(time() - 999), time())
     with broker.get() as conn:
         self.assertEqual(conn.execute(
             "SELECT count(*) FROM container "
             "WHERE deleted = 0").fetchone()[0], 1)
         self.assertEqual(conn.execute(
             "SELECT count(*) FROM container "
             "WHERE deleted = 1").fetchone()[0], 0)
     sleep(.00001)
     broker.put_container('c', 0, normalize_timestamp(time()), 0, 0)
     with broker.get() as conn:
         self.assertEqual(conn.execute(
             "SELECT count(*) FROM container "
             "WHERE deleted = 0").fetchone()[0], 0)
         self.assertEqual(conn.execute(
             "SELECT count(*) FROM container "
             "WHERE deleted = 1").fetchone()[0], 1)
     broker.reclaim(normalize_timestamp(time() - 999), time())
     with broker.get() as conn:
         self.assertEqual(conn.execute(
             "SELECT count(*) FROM container "
             "WHERE deleted = 0").fetchone()[0], 0)
         self.assertEqual(conn.execute(
             "SELECT count(*) FROM container "
             "WHERE deleted = 1").fetchone()[0], 1)
     sleep(.00001)
     broker.reclaim(normalize_timestamp(time()), time())
     with broker.get() as conn:
         self.assertEqual(conn.execute(
             "SELECT count(*) FROM container "
             "WHERE deleted = 0").fetchone()[0], 0)
         self.assertEqual(conn.execute(
             "SELECT count(*) FROM container "
             "WHERE deleted = 1").fetchone()[0], 0)
     # Test reclaim after deletion. Create 3 test containers
     broker.put_container('x', 0, 0, 0, 0)
     broker.put_container('y', 0, 0, 0, 0)
     broker.put_container('z', 0, 0, 0, 0)
     broker.reclaim(normalize_timestamp(time()), time())
     # self.assertEqual(len(res), 2)
     # self.assert_(isinstance(res, tuple))
     # containers, account_name = res
     # self.assert_(containers is None)
     # self.assert_(account_name is None)
     # Now delete the account
     broker.delete_db(normalize_timestamp(time()))
     broker.reclaim(normalize_timestamp(time()), time())
Example #13
0
 def test_report_up_to_date(self):
     repl = replicator.ContainerReplicator({})
     info = {'put_timestamp': normalize_timestamp(1),
             'delete_timestamp': normalize_timestamp(0),
             'object_count': 0,
             'bytes_used': 0,
             'reported_put_timestamp': normalize_timestamp(1),
             'reported_delete_timestamp': normalize_timestamp(0),
             'reported_object_count': 0,
             'reported_bytes_used': 0}
     self.assertTrue(repl.report_up_to_date(info))
     info['delete_timestamp'] = normalize_timestamp(2)
     self.assertFalse(repl.report_up_to_date(info))
     info['reported_delete_timestamp'] = normalize_timestamp(2)
     self.assertTrue(repl.report_up_to_date(info))
     info['object_count'] = 1
     self.assertFalse(repl.report_up_to_date(info))
     info['reported_object_count'] = 1
     self.assertTrue(repl.report_up_to_date(info))
     info['bytes_used'] = 1
     self.assertFalse(repl.report_up_to_date(info))
     info['reported_bytes_used'] = 1
     self.assertTrue(repl.report_up_to_date(info))
     info['put_timestamp'] = normalize_timestamp(3)
     self.assertFalse(repl.report_up_to_date(info))
     info['reported_put_timestamp'] = normalize_timestamp(3)
     self.assertTrue(repl.report_up_to_date(info))
Example #14
0
 def test_report_up_to_date(self):
     repl = replicator.ContainerReplicator({})
     info = {
         'put_timestamp': normalize_timestamp(1),
         'delete_timestamp': normalize_timestamp(0),
         'object_count': 0,
         'bytes_used': 0,
         'reported_put_timestamp': normalize_timestamp(1),
         'reported_delete_timestamp': normalize_timestamp(0),
         'reported_object_count': 0,
         'reported_bytes_used': 0
     }
     self.assertTrue(repl.report_up_to_date(info))
     info['delete_timestamp'] = normalize_timestamp(2)
     self.assertFalse(repl.report_up_to_date(info))
     info['reported_delete_timestamp'] = normalize_timestamp(2)
     self.assertTrue(repl.report_up_to_date(info))
     info['object_count'] = 1
     self.assertFalse(repl.report_up_to_date(info))
     info['reported_object_count'] = 1
     self.assertTrue(repl.report_up_to_date(info))
     info['bytes_used'] = 1
     self.assertFalse(repl.report_up_to_date(info))
     info['reported_bytes_used'] = 1
     self.assertTrue(repl.report_up_to_date(info))
     info['put_timestamp'] = normalize_timestamp(3)
     self.assertFalse(repl.report_up_to_date(info))
     info['reported_put_timestamp'] = normalize_timestamp(3)
     self.assertTrue(repl.report_up_to_date(info))
Example #15
0
    def test_merge_syncs(self):
        broker = DatabaseBroker(':memory:')

        def stub(*args, **kwargs):
            pass
        broker._initialize = stub
        broker.initialize(normalize_timestamp('1'))
        uuid2 = str(uuid4())
        broker.merge_syncs([{'sync_point': 1, 'remote_id': uuid2}])
        self.assertEquals(broker.get_sync(uuid2), 1)
        uuid3 = str(uuid4())
        broker.merge_syncs([{'sync_point': 2, 'remote_id': uuid3}])
        self.assertEquals(broker.get_sync(uuid2), 1)
        self.assertEquals(broker.get_sync(uuid3), 2)
        self.assertEquals(broker.get_sync(uuid2, incoming=False), -1)
        self.assertEquals(broker.get_sync(uuid3, incoming=False), -1)
        broker.merge_syncs([{'sync_point': 3, 'remote_id': uuid2},
                            {'sync_point': 4, 'remote_id': uuid3}],
                           incoming=False)
        self.assertEquals(broker.get_sync(uuid2, incoming=False), 3)
        self.assertEquals(broker.get_sync(uuid3, incoming=False), 4)
        self.assertEquals(broker.get_sync(uuid2), 1)
        self.assertEquals(broker.get_sync(uuid3), 2)
        broker.merge_syncs([{'sync_point': 5, 'remote_id': uuid2}])
        self.assertEquals(broker.get_sync(uuid2), 5)
Example #16
0
 def test_delete_partition_with_handoff_delete(self):
     with mock.patch('swift.obj.replicator.http_connect',
                     mock_http_connect(200)):
         self.replicator.handoff_delete = 2
         df = diskfile.DiskFile(self.devices, 'sda', '1', 'a', 'c', 'o',
                                FakeLogger())
         mkdirs(df.datadir)
         print df.datadir
         f = open(
             os.path.join(df.datadir,
                          normalize_timestamp(time.time()) + '.data'), 'wb')
         f.write('1234567890')
         f.close()
         ohash = hash_path('a', 'c', 'o')
         data_dir = ohash[-3:]
         whole_path_from = os.path.join(self.objects, '1', data_dir)
         part_path = os.path.join(self.objects, '1')
         self.assertTrue(os.access(part_path, os.F_OK))
         nodes = [
             node for node in self.ring.get_part_nodes(1)
             if node['ip'] not in _ips()
         ]
         process_arg_checker = []
         for i, node in enumerate(nodes):
             rsync_mod = '%s::object/sda/objects/%s' % (node['ip'], 1)
             if i == 0:
                 # force one of the rsync calls to fail
                 ret_code = 1
             else:
                 ret_code = 0
             process_arg_checker.append(
                 (ret_code, '', ['rsync', whole_path_from, rsync_mod]))
         with _mock_process(process_arg_checker):
             self.replicator.replicate()
         self.assertFalse(os.access(part_path, os.F_OK))
Example #17
0
    def test_run_once(self):
        replicator = object_replicator.ObjectReplicator(
            dict(swift_dir=self.testdir,
                 devices=self.devices,
                 mount_check='false',
                 timeout='300',
                 stats_interval='1'))
        was_connector = object_replicator.http_connect
        object_replicator.http_connect = mock_http_connect(200)
        cur_part = '0'
        df = diskfile.DiskFile(self.devices, 'sda', cur_part, 'a', 'c', 'o',
                               FakeLogger())
        mkdirs(df.datadir)
        f = open(
            os.path.join(df.datadir,
                         normalize_timestamp(time.time()) + '.data'), 'wb')
        f.write('1234567890')
        f.close()
        ohash = hash_path('a', 'c', 'o')
        data_dir = ohash[-3:]
        whole_path_from = os.path.join(self.objects, cur_part, data_dir)
        process_arg_checker = []
        nodes = [
            node for node in self.ring.get_part_nodes(int(cur_part))
            if node['ip'] not in _ips()
        ]
        for node in nodes:
            rsync_mod = '%s::object/sda/objects/%s' % (node['ip'], cur_part)
            process_arg_checker.append(
                (0, '', ['rsync', whole_path_from, rsync_mod]))
        with _mock_process(process_arg_checker):
            replicator.run_once()
        self.assertFalse(process_errors)

        object_replicator.http_connect = was_connector
Example #18
0
    def _get_disk_file(self,
                       invalid_type=None,
                       obj_name='o',
                       fsize=1024,
                       csize=8,
                       mark_deleted=False,
                       ts=None,
                       iter_hook=None,
                       mount_check=False,
                       extra_metadata=None):
        '''returns a DiskFile'''
        df = diskfile.DiskFile(self.testdir, 'sda1', '0', 'a', 'c', obj_name,
                               FakeLogger())
        data = '0' * fsize
        etag = md5()
        if ts:
            timestamp = ts
        else:
            timestamp = str(normalize_timestamp(time()))
        with df.create() as writer:
            writer.write(data)
            etag.update(data)
            etag = etag.hexdigest()
            metadata = {
                'ETag': etag,
                'X-Timestamp': timestamp,
                'Content-Length': str(os.fstat(writer.fd).st_size),
            }
            metadata.update(extra_metadata or {})
            writer.put(metadata)
            if invalid_type == 'ETag':
                etag = md5()
                etag.update('1' + '0' * (fsize - 1))
                etag = etag.hexdigest()
                metadata['ETag'] = etag
                diskfile.write_metadata(writer.fd, metadata)
            if invalid_type == 'Content-Length':
                metadata['Content-Length'] = fsize - 1
                diskfile.write_metadata(writer.fd, metadata)

        if mark_deleted:
            metadata = {'X-Timestamp': timestamp, 'deleted': True}
            df.put_metadata(metadata, tombstone=True)

        df = diskfile.DiskFile(self.testdir,
                               'sda1',
                               '0',
                               'a',
                               'c',
                               obj_name,
                               FakeLogger(),
                               disk_chunk_size=csize,
                               iter_hook=iter_hook,
                               mount_check=mount_check)
        df.open()
        if invalid_type == 'Zero-Byte':
            fp = open(df.data_file, 'w')
            fp.close()
        df.unit_test_len = fsize
        return df
Example #19
0
    def test_object_run_fast_track_non_zero(self):
        self.auditor = auditor.ObjectAuditor(self.conf)
        self.auditor.log_time = 0
        data = '0' * 1024
        etag = md5()
        with self.disk_file.create() as writer:
            writer.write(data)
            etag.update(data)
            etag = etag.hexdigest()
            metadata = {
                'ETag': etag,
                'X-Timestamp': str(normalize_timestamp(time.time())),
                'Content-Length': str(os.fstat(writer.fd).st_size),
            }
            writer.put(metadata)
            etag = md5()
            etag.update('1' + '0' * 1023)
            etag = etag.hexdigest()
            metadata['ETag'] = etag
            write_metadata(writer.fd, metadata)

        quarantine_path = os.path.join(self.devices,
                                       'sda', 'quarantined', 'objects')
        self.auditor.run_once(zero_byte_fps=50)
        self.assertFalse(os.path.isdir(quarantine_path))
        self.auditor.run_once()
        self.assertTrue(os.path.isdir(quarantine_path))
Example #20
0
    def test_object_audit_extra_data(self):
        auditor_worker = auditor.AuditorWorker(self.conf, self.logger)
        data = '0' * 1024
        etag = md5()
        with self.disk_file.create() as writer:
            writer.write(data)
            etag.update(data)
            etag = etag.hexdigest()
            timestamp = str(normalize_timestamp(time.time()))
            metadata = {
                'ETag': etag,
                'X-Timestamp': timestamp,
                'Content-Length': str(os.fstat(writer.fd).st_size),
            }
            writer.put(metadata)
            pre_quarantines = auditor_worker.quarantines

            auditor_worker.object_audit(
                os.path.join(self.disk_file.datadir, timestamp + '.data'),
                'sda', '0')
            self.assertEquals(auditor_worker.quarantines, pre_quarantines)

            os.write(writer.fd, 'extra_data')
            auditor_worker.object_audit(
                os.path.join(self.disk_file.datadir, timestamp + '.data'),
                'sda', '0')
            self.assertEquals(auditor_worker.quarantines, pre_quarantines + 1)
Example #21
0
    def create_account_stat_table(self, conn, put_timestamp):
        """
	创建用户状态表
        Create account_stat table which is specific to the account DB.
        Not a part of Pluggable Back-ends, internal to the baseline code.

        :param conn: DB connection object
        :param put_timestamp: put timestamp
        """
        conn.executescript("""
            CREATE TABLE account_stat (
                account TEXT,
                created_at TEXT,
                put_timestamp TEXT DEFAULT '0',
                delete_timestamp TEXT DEFAULT '0',
                container_count INTEGER,
                object_count INTEGER DEFAULT 0,
                bytes_used INTEGER DEFAULT 0,
                hash TEXT default '00000000000000000000000000000000',
                id TEXT,
                status TEXT DEFAULT '',
                status_changed_at TEXT DEFAULT '0',
                metadata TEXT DEFAULT ''
            );

            INSERT INTO account_stat (container_count) VALUES (0);
        """)

        conn.execute('''
            UPDATE account_stat SET account = ?, created_at = ?, id = ?,
                   put_timestamp = ?
            ''', (self.account, normalize_timestamp(time.time()), str(uuid4()),
                  put_timestamp))
Example #22
0
def premetadata_create_account_stat_table(self, conn, put_timestamp):
    """
    Copied from AccountBroker before the metadata column was
    added; used for testing with TestAccountBrokerBeforeMetadata.

    Create account_stat table which is specific to the account DB.

    :param conn: DB connection object
    :param put_timestamp: put timestamp
    """
    conn.executescript('''
        CREATE TABLE account_stat (
            account TEXT,
            created_at TEXT,
            put_timestamp TEXT DEFAULT '0',
            delete_timestamp TEXT DEFAULT '0',
            container_count INTEGER,
            object_count INTEGER DEFAULT 0,
            bytes_used INTEGER DEFAULT 0,
            hash TEXT default '00000000000000000000000000000000',
            id TEXT,
            status TEXT DEFAULT '',
            status_changed_at TEXT DEFAULT '0'
        );

        INSERT INTO account_stat (container_count) VALUES (0);
    ''')

    conn.execute(
        '''
        UPDATE account_stat SET account = ?, created_at = ?, id = ?,
               put_timestamp = ?
        ''', (self.account, normalize_timestamp(time()), str(
            uuid4()), put_timestamp))
Example #23
0
 def tearDown(self):
     AccountBroker.create_account_stat_table = \
         self._imported_create_account_stat_table
     broker = AccountBroker(':memory:', account='a')
     broker.initialize(normalize_timestamp('1'))
     with broker.get() as conn:
         conn.execute('SELECT metadata FROM account_stat')
Example #24
0
 def tearDown(self):
     AccountBroker.create_account_stat_table = \
         self._imported_create_account_stat_table
     broker = AccountBroker(':memory:', account='a')
     broker.initialize(normalize_timestamp('1'))
     with broker.get() as conn:
         conn.execute('SELECT metadata FROM account_stat')
Example #25
0
    def generate_request_headers(self, orig_req=None, additional=None,
                                 transfer=False):
        """
        Create a list of headers to be used in backend requets

        :param orig_req: the original request sent by the client to the proxy
        :param additional: additional headers to send to the backend
        :param transfer: If True, transfer headers from original client request
        :returns: a dictionary of headers
        """
        # Use the additional headers first so they don't overwrite the headers
        # we require.
        headers = HeaderKeyDict(additional) if additional else HeaderKeyDict()
        if transfer:
            self.transfer_headers(orig_req.headers, headers)
        headers.setdefault('x-timestamp', normalize_timestamp(time.time()))
        if orig_req:
            referer = orig_req.as_referer()
        else:
            referer = ''
        headers['x-trans-id'] = self.trans_id
        headers['connection'] = 'close'
        headers['user-agent'] = 'proxy-server %s' % os.getpid()
        headers['referer'] = referer
        return headers
Example #26
0
 def test_object_run_once_multi_devices(self):
     auditor_worker = auditor.AuditorWorker(self.conf, self.logger)
     timestamp = str(normalize_timestamp(time.time()))
     pre_quarantines = auditor_worker.quarantines
     data = '0' * 10
     etag = md5()
     with self.disk_file.create() as writer:
         writer.write(data)
         etag.update(data)
         etag = etag.hexdigest()
         metadata = {
             'ETag': etag,
             'X-Timestamp': timestamp,
             'Content-Length': str(os.fstat(writer.fd).st_size),
         }
         writer.put(metadata)
     auditor_worker.audit_all_objects()
     self.disk_file = DiskFile(self.devices, 'sdb', '0', 'a', 'c',
                               'ob', self.logger)
     data = '1' * 10
     etag = md5()
     with self.disk_file.create() as writer:
         writer.write(data)
         etag.update(data)
         etag = etag.hexdigest()
         metadata = {
             'ETag': etag,
             'X-Timestamp': timestamp,
             'Content-Length': str(os.fstat(writer.fd).st_size),
         }
         writer.put(metadata)
         os.write(writer.fd, 'extra_data')
     auditor_worker.audit_all_objects()
     self.assertEquals(auditor_worker.quarantines, pre_quarantines + 1)
Example #27
0
    def setup_bad_zero_byte(self, with_ts=False):
        self.auditor = auditor.ObjectAuditor(self.conf)
        self.auditor.log_time = 0
        ts_file_path = ''
        if with_ts:

            name_hash = hash_path('a', 'c', 'o')
            dir_path = os.path.join(
                self.devices, 'sda',
                storage_directory(DATADIR, '0', name_hash))
            ts_file_path = os.path.join(dir_path, '99999.ts')
            if not os.path.exists(dir_path):
                mkdirs(dir_path)
            fp = open(ts_file_path, 'w')
            fp.close()

        etag = md5()
        with self.disk_file.create() as writer:
            etag = etag.hexdigest()
            metadata = {
                'ETag': etag,
                'X-Timestamp': str(normalize_timestamp(time.time())),
                'Content-Length': 10,
            }
            writer.put(metadata)
            etag = md5()
            etag = etag.hexdigest()
            metadata['ETag'] = etag
            write_metadata(writer.fd, metadata)
        if self.disk_file.data_file:
            return self.disk_file.data_file
        return ts_file_path
Example #28
0
    def generate_request_headers(self, orig_req=None, additional=None,
                                 transfer=False):
        """
        Create a list of headers to be used in backend requets

        :param orig_req: the original request sent by the client to the proxy
        :param additional: additional headers to send to the backend
        :param transfer: If True, transfer headers from original client request
        :returns: a dictionary of headers
        """
        # Use the additional headers first so they don't overwrite the headers
        # we require.
        headers = HeaderKeyDict(additional) if additional else HeaderKeyDict()
        if transfer:
            self.transfer_headers(orig_req.headers, headers)
        headers.setdefault('x-timestamp', normalize_timestamp(time.time()))
        if orig_req:
            referer = orig_req.as_referer()
        else:
            referer = ''
        headers['x-trans-id'] = self.trans_id
        headers['connection'] = 'close'
        headers['user-agent'] = 'proxy-server %s' % os.getpid()
        headers['referer'] = referer
        return headers
Example #29
0
    def test_run_once(self):
        replicator = object_replicator.ObjectReplicator(
            dict(swift_dir=self.testdir, devices=self.devices,
                 mount_check='false', timeout='300', stats_interval='1'))
        was_connector = object_replicator.http_connect
        object_replicator.http_connect = mock_http_connect(200)
        cur_part = '0'
        df = diskfile.DiskFile(self.devices, 'sda', cur_part, 'a', 'c', 'o',
                               FakeLogger())
        mkdirs(df.datadir)
        f = open(os.path.join(df.datadir,
                              normalize_timestamp(time.time()) + '.data'),
                 'wb')
        f.write('1234567890')
        f.close()
        ohash = hash_path('a', 'c', 'o')
        data_dir = ohash[-3:]
        whole_path_from = os.path.join(self.objects, cur_part, data_dir)
        process_arg_checker = []
        nodes = [node for node in
                 self.ring.get_part_nodes(int(cur_part))
                 if node['ip'] not in _ips()]
        for node in nodes:
            rsync_mod = '%s::object/sda/objects/%s' % (node['ip'], cur_part)
            process_arg_checker.append(
                (0, '', ['rsync', whole_path_from, rsync_mod]))
        with _mock_process(process_arg_checker):
            replicator.run_once()
        self.assertFalse(process_errors)

        object_replicator.http_connect = was_connector
Example #30
0
 def POST(self, req):
     """Handle HTTP POST request."""
     drive, part, account, container = split_and_validate_path(req, 4)
     if 'x-timestamp' not in req.headers or \
             not check_float(req.headers['x-timestamp']):
         return HTTPBadRequest(body='Missing or bad timestamp',
                               request=req,
                               content_type='text/plain')
     if 'x-container-sync-to' in req.headers:
         err = validate_sync_to(req.headers['x-container-sync-to'],
                                self.allowed_sync_hosts)
         if err:
             return HTTPBadRequest(err)
     if self.mount_check and not check_mount(self.root, drive):
         return HTTPInsufficientStorage(drive=drive, request=req)
     broker = self._get_container_broker(drive, part, account, container)
     if broker.is_deleted():
         return HTTPNotFound(request=req)
     timestamp = normalize_timestamp(req.headers['x-timestamp'])
     metadata = {}
     metadata.update((key, (value, timestamp))
                     for key, value in req.headers.iteritems()
                     if key.lower() in self.save_headers
                     or key.lower().startswith('x-container-meta-'))
     if metadata:
         if 'X-Container-Sync-To' in metadata:
             if 'X-Container-Sync-To' not in broker.metadata or \
                     metadata['X-Container-Sync-To'][0] != \
                     broker.metadata['X-Container-Sync-To'][0]:
                 broker.set_x_container_sync_points(-1, -1)
         broker.update_metadata(metadata)
     return HTTPNoContent(request=req)
Example #31
0
    def test_hash_suffix_multi_file_two(self):
        df = diskfile.DiskFile(self.devices, 'sda', '0', 'a', 'c', 'o',
                               FakeLogger())
        mkdirs(df.datadir)
        for tdiff in [1, 50, 100, 500]:
            suffs = ['.meta', '.data']
            if tdiff > 50:
                suffs.append('.ts')
            for suff in suffs:
                f = open(
                    os.path.join(
                        df.datadir,
                        normalize_timestamp(int(time()) - tdiff) + suff), 'wb')
                f.write('1234567890')
                f.close()

        ohash = hash_path('a', 'c', 'o')
        data_dir = ohash[-3:]
        whole_path_from = os.path.join(self.objects, '0', data_dir)
        hsh_path = os.listdir(whole_path_from)[0]
        whole_hsh_path = os.path.join(whole_path_from, hsh_path)

        diskfile.hash_suffix(whole_path_from, 99)
        # only the meta and data should be left
        self.assertEquals(len(os.listdir(whole_hsh_path)), 2)
Example #32
0
 def test_delete_partition(self):
     with mock.patch('swift.obj.replicator.http_connect',
                     mock_http_connect(200)):
         df = diskfile.DiskFile(self.devices,
                                'sda', '1', 'a', 'c', 'o', FakeLogger())
         mkdirs(df.datadir)
         print df.datadir
         f = open(os.path.join(df.datadir,
                               normalize_timestamp(time.time()) + '.data'),
                  'wb')
         f.write('1234567890')
         f.close()
         ohash = hash_path('a', 'c', 'o')
         data_dir = ohash[-3:]
         whole_path_from = os.path.join(self.objects, '1', data_dir)
         part_path = os.path.join(self.objects, '1')
         self.assertTrue(os.access(part_path, os.F_OK))
         nodes = [node for node in
                  self.ring.get_part_nodes(1)
                  if node['ip'] not in _ips()]
         process_arg_checker = []
         for node in nodes:
             rsync_mod = '%s::object/sda/objects/%s' % (node['ip'], 1)
             process_arg_checker.append(
                 (0, '', ['rsync', whole_path_from, rsync_mod]))
         with _mock_process(process_arg_checker):
             self.replicator.replicate()
         self.assertFalse(os.access(part_path, os.F_OK))
Example #33
0
 def POST(self, req):
     """Handle HTTP POST request."""
     drive, part, account, container = split_and_validate_path(req, 4)
     if 'x-timestamp' not in req.headers or \
             not check_float(req.headers['x-timestamp']):
         return HTTPBadRequest(body='Missing or bad timestamp',
                               request=req, content_type='text/plain')
     if 'x-container-sync-to' in req.headers:
         err = validate_sync_to(req.headers['x-container-sync-to'],
                                self.allowed_sync_hosts)
         if err:
             return HTTPBadRequest(err)
     if self.mount_check and not check_mount(self.root, drive):
         return HTTPInsufficientStorage(drive=drive, request=req)
     broker = self._get_container_broker(drive, part, account, container)
     if broker.is_deleted():
         return HTTPNotFound(request=req)
     timestamp = normalize_timestamp(req.headers['x-timestamp'])
     metadata = {}
     metadata.update(
         (key, (value, timestamp)) for key, value in req.headers.iteritems()
         if key.lower() in self.save_headers or
         key.lower().startswith('x-container-meta-'))
     if metadata:
         if 'X-Container-Sync-To' in metadata:
             if 'X-Container-Sync-To' not in broker.metadata or \
                     metadata['X-Container-Sync-To'][0] != \
                     broker.metadata['X-Container-Sync-To'][0]:
                 broker.set_x_container_sync_points(-1, -1)
         broker.update_metadata(metadata)
     return HTTPNoContent(request=req)
Example #34
0
 def test_delete_partition_with_handoff_delete_failures(self):
     with mock.patch('swift.obj.replicator.http_connect',
                     mock_http_connect(200)):
         self.replicator.handoff_delete = 2
         df = diskfile.DiskFile(self.devices,
                                'sda', '1', 'a', 'c', 'o', FakeLogger())
         mkdirs(df.datadir)
         print df.datadir
         f = open(os.path.join(df.datadir,
                               normalize_timestamp(time.time()) + '.data'),
                  'wb')
         f.write('1234567890')
         f.close()
         ohash = hash_path('a', 'c', 'o')
         data_dir = ohash[-3:]
         whole_path_from = os.path.join(self.objects, '1', data_dir)
         part_path = os.path.join(self.objects, '1')
         self.assertTrue(os.access(part_path, os.F_OK))
         nodes = [node for node in
                  self.ring.get_part_nodes(1)
                  if node['ip'] not in _ips()]
         process_arg_checker = []
         for i, node in enumerate(nodes):
             rsync_mod = '%s::object/sda/objects/%s' % (node['ip'], 1)
             if i in (0, 1):
                 # force two of the rsync calls to fail
                 ret_code = 1
             else:
                 ret_code = 0
             process_arg_checker.append(
                 (ret_code, '', ['rsync', whole_path_from, rsync_mod]))
         with _mock_process(process_arg_checker):
             self.replicator.replicate()
         # The file should still exist
         self.assertTrue(os.access(part_path, os.F_OK))
Example #35
0
    def test_merge_syncs(self):
        broker = DatabaseBroker(':memory:')

        def stub(*args, **kwargs):
            pass
        broker._initialize = stub
        broker.initialize(normalize_timestamp('1'))
        uuid2 = str(uuid4())
        broker.merge_syncs([{'sync_point': 1, 'remote_id': uuid2}])
        self.assertEquals(broker.get_sync(uuid2), 1)
        uuid3 = str(uuid4())
        broker.merge_syncs([{'sync_point': 2, 'remote_id': uuid3}])
        self.assertEquals(broker.get_sync(uuid2), 1)
        self.assertEquals(broker.get_sync(uuid3), 2)
        self.assertEquals(broker.get_sync(uuid2, incoming=False), -1)
        self.assertEquals(broker.get_sync(uuid3, incoming=False), -1)
        broker.merge_syncs([{'sync_point': 3, 'remote_id': uuid2},
                            {'sync_point': 4, 'remote_id': uuid3}],
                           incoming=False)
        self.assertEquals(broker.get_sync(uuid2, incoming=False), 3)
        self.assertEquals(broker.get_sync(uuid3, incoming=False), 4)
        self.assertEquals(broker.get_sync(uuid2), 1)
        self.assertEquals(broker.get_sync(uuid3), 2)
        broker.merge_syncs([{'sync_point': 5, 'remote_id': uuid2}])
        self.assertEquals(broker.get_sync(uuid2), 5)
Example #36
0
 def get_info(self):
     now = normalize_timestamp(time.time())
     return {'container_count': 0,
             'object_count': 0,
             'bytes_used': 0,
             'created_at': now,
             'put_timestamp': now}
Example #37
0
    def create_account_stat_table(self, conn, put_timestamp):
        """
        Create account_stat table which is specific to the account DB.
        Not a part of Pluggable Back-ends, internal to the baseline code.

        :param conn: DB connection object
        :param put_timestamp: put timestamp
        """
        conn.executescript("""
            CREATE TABLE account_stat (
                account TEXT,
                created_at TEXT,
                put_timestamp TEXT DEFAULT '0',
                delete_timestamp TEXT DEFAULT '0',
                container_count INTEGER,
                object_count INTEGER DEFAULT 0,
                bytes_used INTEGER DEFAULT 0,
                hash TEXT default '00000000000000000000000000000000',
                id TEXT,
                status TEXT DEFAULT '',
                status_changed_at TEXT DEFAULT '0',
                metadata TEXT DEFAULT ''
            );

            INSERT INTO account_stat (container_count) VALUES (0);
        """)

        conn.execute(
            '''
            UPDATE account_stat SET account = ?, created_at = ?, id = ?,
                   put_timestamp = ?
            ''', (self.account, normalize_timestamp(time.time()), str(
                uuid4()), put_timestamp))
Example #38
0
    def test_run_once_recover_from_timeout(self):
        replicator = object_replicator.ObjectReplicator(
            dict(swift_dir=self.testdir, devices=self.devices,
                 mount_check='false', timeout='300', stats_interval='1'))
        was_connector = object_replicator.http_connect
        was_get_hashes = object_replicator.get_hashes
        was_execute = tpool.execute
        self.get_hash_count = 0
        try:

            def fake_get_hashes(*args, **kwargs):
                self.get_hash_count += 1
                if self.get_hash_count == 3:
                    # raise timeout on last call to get hashes
                    raise Timeout()
                return 2, {'abc': 'def'}

            def fake_exc(tester, *args, **kwargs):
                if 'Error syncing partition' in args[0]:
                    tester.i_failed = True

            self.i_failed = False
            object_replicator.http_connect = mock_http_connect(200)
            object_replicator.get_hashes = fake_get_hashes
            replicator.logger.exception = \
                lambda *args, **kwargs: fake_exc(self, *args, **kwargs)
            # Write some files into '1' and run replicate- they should be moved
            # to the other partitions and then node should get deleted.
            cur_part = '1'
            df = diskfile.DiskFile(
                self.devices, 'sda', cur_part, 'a', 'c', 'o', FakeLogger())
            mkdirs(df.datadir)
            f = open(os.path.join(df.datadir,
                                  normalize_timestamp(time.time()) + '.data'),
                     'wb')
            f.write('1234567890')
            f.close()
            ohash = hash_path('a', 'c', 'o')
            data_dir = ohash[-3:]
            whole_path_from = os.path.join(self.objects, cur_part, data_dir)
            process_arg_checker = []
            nodes = [node for node in
                     self.ring.get_part_nodes(int(cur_part))
                     if node['ip'] not in _ips()]
            for node in nodes:
                rsync_mod = '%s::object/sda/objects/%s' % (node['ip'],
                                                           cur_part)
                process_arg_checker.append(
                    (0, '', ['rsync', whole_path_from, rsync_mod]))
            self.assertTrue(os.access(os.path.join(self.objects,
                                                   '1', data_dir, ohash),
                                      os.F_OK))
            with _mock_process(process_arg_checker):
                replicator.run_once()
            self.assertFalse(process_errors)
            self.assertFalse(self.i_failed)
        finally:
            object_replicator.http_connect = was_connector
            object_replicator.get_hashes = was_get_hashes
            tpool.execute = was_execute
Example #39
0
def premetadata_create_account_stat_table(self, conn, put_timestamp):
    """
    Copied from AccountBroker before the metadata column was
    added; used for testing with TestAccountBrokerBeforeMetadata.

    Create account_stat table which is specific to the account DB.

    :param conn: DB connection object
    :param put_timestamp: put timestamp
    """
    conn.executescript('''
        CREATE TABLE account_stat (
            account TEXT,
            created_at TEXT,
            put_timestamp TEXT DEFAULT '0',
            delete_timestamp TEXT DEFAULT '0',
            container_count INTEGER,
            object_count INTEGER DEFAULT 0,
            bytes_used INTEGER DEFAULT 0,
            hash TEXT default '00000000000000000000000000000000',
            id TEXT,
            status TEXT DEFAULT '',
            status_changed_at TEXT DEFAULT '0'
        );

        INSERT INTO account_stat (container_count) VALUES (0);
    ''')

    conn.execute('''
        UPDATE account_stat SET account = ?, created_at = ?, id = ?,
               put_timestamp = ?
        ''', (self.account, normalize_timestamp(time()), str(uuid4()),
              put_timestamp))
Example #40
0
    def _get_disk_file(
        self,
        invalid_type=None,
        obj_name="o",
        fsize=1024,
        csize=8,
        mark_deleted=False,
        ts=None,
        iter_hook=None,
        mount_check=False,
        extra_metadata=None,
    ):
        """returns a DiskFile"""
        df = diskfile.DiskFile(self.testdir, "sda1", "0", "a", "c", obj_name, FakeLogger())
        data = "0" * fsize
        etag = md5()
        if ts:
            timestamp = ts
        else:
            timestamp = str(normalize_timestamp(time()))
        with df.create() as writer:
            writer.write(data)
            etag.update(data)
            etag = etag.hexdigest()
            metadata = {"ETag": etag, "X-Timestamp": timestamp, "Content-Length": str(os.fstat(writer.fd).st_size)}
            metadata.update(extra_metadata or {})
            writer.put(metadata)
            if invalid_type == "ETag":
                etag = md5()
                etag.update("1" + "0" * (fsize - 1))
                etag = etag.hexdigest()
                metadata["ETag"] = etag
                diskfile.write_metadata(writer.fd, metadata)
            if invalid_type == "Content-Length":
                metadata["Content-Length"] = fsize - 1
                diskfile.write_metadata(writer.fd, metadata)

        if mark_deleted:
            metadata = {"X-Timestamp": timestamp, "deleted": True}
            df.put_metadata(metadata, tombstone=True)

        df = diskfile.DiskFile(
            self.testdir,
            "sda1",
            "0",
            "a",
            "c",
            obj_name,
            FakeLogger(),
            disk_chunk_size=csize,
            iter_hook=iter_hook,
            mount_check=mount_check,
        )
        df.open()
        if invalid_type == "Zero-Byte":
            fp = open(df.data_file, "w")
            fp.close()
        df.unit_test_len = fsize
        return df
Example #41
0
 def test_delete(self):
     df = self._get_disk_file()
     ts = time()
     df.delete(ts)
     exp_name = "%s.ts" % str(normalize_timestamp(ts))
     dl = os.listdir(df.datadir)
     self.assertEquals(len(dl), 1)
     self.assertTrue(exp_name in set(dl))
Example #42
0
 def test_delete(self):
     df = self._get_disk_file()
     ts = time()
     df.delete(ts)
     exp_name = '%s.ts' % str(normalize_timestamp(ts))
     dl = os.listdir(df.datadir)
     self.assertEquals(len(dl), 1)
     self.assertTrue(exp_name in set(dl))
Example #43
0
 def _create_ondisk_file(self, df, data, timestamp, ext=".data"):
     mkdirs(df.datadir)
     timestamp = normalize_timestamp(timestamp)
     data_file = os.path.join(df.datadir, timestamp + ext)
     with open(data_file, "wb") as f:
         f.write(data)
         md = {"X-Timestamp": timestamp}
         setxattr(f.fileno(), diskfile.METADATA_KEY, pickle.dumps(md, diskfile.PICKLE_PROTOCOL))
Example #44
0
 def test_put_metadata_ts(self):
     df = self._get_disk_file()
     ts = time()
     metadata = {'X-Timestamp': ts, 'X-Object-Meta-test': 'data'}
     df.put_metadata(metadata, tombstone=True)
     exp_name = '%s.ts' % str(normalize_timestamp(ts))
     dl = os.listdir(df.datadir)
     self.assertEquals(len(dl), 1)
     self.assertTrue(exp_name in set(dl))
Example #45
0
 def test_put_metadata_ts(self):
     df = self._get_disk_file()
     ts = time()
     metadata = {"X-Timestamp": ts, "X-Object-Meta-test": "data"}
     df.put_metadata(metadata, tombstone=True)
     exp_name = "%s.ts" % str(normalize_timestamp(ts))
     dl = os.listdir(df.datadir)
     self.assertEquals(len(dl), 1)
     self.assertTrue(exp_name in set(dl))
Example #46
0
 def _create_ondisk_file(self, df, data, timestamp, ext='.data'):
     mkdirs(df.datadir)
     timestamp = normalize_timestamp(timestamp)
     data_file = os.path.join(df.datadir, timestamp + ext)
     with open(data_file, 'wb') as f:
         f.write(data)
         md = {'X-Timestamp': timestamp}
         setxattr(f.fileno(), diskfile.METADATA_KEY,
                  pickle.dumps(md, diskfile.PICKLE_PROTOCOL))
Example #47
0
 def get_info(self):
     now = normalize_timestamp(time.time())
     return {
         'container_count': 0,
         'object_count': 0,
         'bytes_used': 0,
         'created_at': now,
         'put_timestamp': now
     }
Example #48
0
    def create_container_stat_table(self, conn, put_timestamp=None):
        """
        Create the container_stat table which is specific to the container DB.
        Not a part of Pluggable Back-ends, internal to the baseline code.

        :param conn: DB connection object
        :param put_timestamp: put timestamp
        """
        if put_timestamp is None:
            put_timestamp = normalize_timestamp(0)
        conn.executescript("""
            CREATE TABLE container_stat (
                account TEXT,
                container TEXT,
                created_at TEXT,
                put_timestamp TEXT DEFAULT '0',
                delete_timestamp TEXT DEFAULT '0',
                object_count INTEGER,
                bytes_used INTEGER,
                reported_put_timestamp TEXT DEFAULT '0',
                reported_delete_timestamp TEXT DEFAULT '0',
                reported_object_count INTEGER DEFAULT 0,
                reported_bytes_used INTEGER DEFAULT 0,
                hash TEXT default '00000000000000000000000000000000',
                id TEXT,
                status TEXT DEFAULT '',
                status_changed_at TEXT DEFAULT '0',
                metadata TEXT DEFAULT '',
                x_container_sync_point1 INTEGER DEFAULT -1,
                x_container_sync_point2 INTEGER DEFAULT -1
            );

            INSERT INTO container_stat (object_count, bytes_used)
                VALUES (0, 0);
        """)
        conn.execute(
            '''
            UPDATE container_stat
            SET account = ?, container = ?, created_at = ?, id = ?,
                put_timestamp = ?
        ''', (self.account, self.container, normalize_timestamp(
                time.time()), str(uuid4()), put_timestamp))
Example #49
0
 def test_merge_items(self):
     broker1 = AccountBroker(':memory:', account='a')
     broker1.initialize(normalize_timestamp('1'))
     broker2 = AccountBroker(':memory:', account='a')
     broker2.initialize(normalize_timestamp('1'))
     broker1.put_container('a', normalize_timestamp(1), 0, 0, 0)
     broker1.put_container('b', normalize_timestamp(2), 0, 0, 0)
     id = broker1.get_info()['id']
     broker2.merge_items(broker1.get_items_since(
         broker2.get_sync(id), 1000), id)
     items = broker2.get_items_since(-1, 1000)
     self.assertEqual(len(items), 2)
     self.assertEqual(['a', 'b'], sorted([rec['name'] for rec in items]))
     broker1.put_container('c', normalize_timestamp(3), 0, 0, 0)
     broker2.merge_items(broker1.get_items_since(
         broker2.get_sync(id), 1000), id)
     items = broker2.get_items_since(-1, 1000)
     self.assertEqual(len(items), 3)
     self.assertEqual(['a', 'b', 'c'],
                      sorted([rec['name'] for rec in items]))
Example #50
0
 def test_merge_items(self):
     broker1 = AccountBroker(':memory:', account='a')
     broker1.initialize(normalize_timestamp('1'))
     broker2 = AccountBroker(':memory:', account='a')
     broker2.initialize(normalize_timestamp('1'))
     broker1.put_container('a', normalize_timestamp(1), 0, 0, 0)
     broker1.put_container('b', normalize_timestamp(2), 0, 0, 0)
     id = broker1.get_info()['id']
     broker2.merge_items(
         broker1.get_items_since(broker2.get_sync(id), 1000), id)
     items = broker2.get_items_since(-1, 1000)
     self.assertEquals(len(items), 2)
     self.assertEquals(['a', 'b'], sorted([rec['name'] for rec in items]))
     broker1.put_container('c', normalize_timestamp(3), 0, 0, 0)
     broker2.merge_items(
         broker1.get_items_since(broker2.get_sync(id), 1000), id)
     items = broker2.get_items_since(-1, 1000)
     self.assertEquals(len(items), 3)
     self.assertEquals(['a', 'b', 'c'],
                       sorted([rec['name'] for rec in items]))
Example #51
0
    def create_container_stat_table(self, conn, put_timestamp=None):
        """
        Create the container_stat table which is specific to the container DB.
        Not a part of Pluggable Back-ends, internal to the baseline code.

        :param conn: DB connection object
        :param put_timestamp: put timestamp
        """
        if put_timestamp is None:
            put_timestamp = normalize_timestamp(0)
        conn.executescript("""
            CREATE TABLE container_stat (
                account TEXT,
                container TEXT,
                created_at TEXT,
                put_timestamp TEXT DEFAULT '0',
                delete_timestamp TEXT DEFAULT '0',
                object_count INTEGER,
                bytes_used INTEGER,
                reported_put_timestamp TEXT DEFAULT '0',
                reported_delete_timestamp TEXT DEFAULT '0',
                reported_object_count INTEGER DEFAULT 0,
                reported_bytes_used INTEGER DEFAULT 0,
                hash TEXT default '00000000000000000000000000000000',
                id TEXT,
                status TEXT DEFAULT '',
                status_changed_at TEXT DEFAULT '0',
                metadata TEXT DEFAULT '',
                x_container_sync_point1 INTEGER DEFAULT -1,
                x_container_sync_point2 INTEGER DEFAULT -1
            );

            INSERT INTO container_stat (object_count, bytes_used)
                VALUES (0, 0);
        """)
        conn.execute('''
            UPDATE container_stat
            SET account = ?, container = ?, created_at = ?, id = ?,
                put_timestamp = ?
        ''', (self.account, self.container, normalize_timestamp(time.time()),
              str(uuid4()), put_timestamp))
Example #52
0
 def test_delete_container(self):
     # Test AccountBroker.delete_container
     broker = AccountBroker(':memory:', account='a')
     broker.initialize(normalize_timestamp('1'))
     broker.put_container('o', normalize_timestamp(time()), 0, 0, 0)
     with broker.get() as conn:
         self.assertEquals(
             conn.execute("SELECT count(*) FROM container "
                          "WHERE deleted = 0").fetchone()[0], 1)
         self.assertEquals(
             conn.execute("SELECT count(*) FROM container "
                          "WHERE deleted = 1").fetchone()[0], 0)
     sleep(.00001)
     broker.put_container('o', 0, normalize_timestamp(time()), 0, 0)
     with broker.get() as conn:
         self.assertEquals(
             conn.execute("SELECT count(*) FROM container "
                          "WHERE deleted = 0").fetchone()[0], 0)
         self.assertEquals(
             conn.execute("SELECT count(*) FROM container "
                          "WHERE deleted = 1").fetchone()[0], 1)
Example #53
0
    def async_update(self, op, account, container, obj, host, partition,
                     contdevice, headers_out, objdevice):
        """
	发送或者保存一个异步更新
        Sends or saves an async update.

        :param op: operation performed (ex: 'PUT', or 'DELETE') 执行的操作
        :param account: account name for the object  account 名
        :param container: container name for the object  容器名
        :param obj: object name   对象名
        :param host: host that the container is on  container所在主机
        :param partition: partition that the container is on  容器所在partition
        :param contdevice: device name that the container is on 容器所在的device名
        :param headers_out: dictionary of headers to send in the container
                            request                   文件头字典,发送容器请求
        :param objdevice: device name that the object is in  对象所在的设备名
        """
        headers_out['user-agent'] = 'obj-server %s' % os.getpid()  
        full_path = '/%s/%s/%s' % (account, container, obj)
        if all([host, partition, contdevice]):
	#如果host,partition,contdevice都在,尝试连接
            try:
                with ConnectionTimeout(self.conn_timeout):
                    ip, port = host.rsplit(':', 1)
                    conn = http_connect(ip, port, contdevice, partition, op,
                                        full_path, headers_out)
                with Timeout(self.node_timeout):
                    response = conn.getresponse()
                    response.read()
                    if is_success(response.status):
                        return
                    else:
                        self.logger.error(_(
                            'ERROR Container update failed '
                            '(saving for async update later): %(status)d '
                            'response from %(ip)s:%(port)s/%(dev)s'),
                            {'status': response.status, 'ip': ip, 'port': port,
                             'dev': contdevice})
            except (Exception, Timeout):
                self.logger.exception(_(
                    'ERROR container update failed with '
                    '%(ip)s:%(port)s/%(dev)s (saving for async update later)'),
                    {'ip': ip, 'port': port, 'dev': contdevice})
        async_dir = os.path.join(self.devices, objdevice, ASYNCDIR)
        ohash = hash_path(account, container, obj)
        self.logger.increment('async_pendings')
        self.threadpools[objdevice].run_in_thread(
            write_pickle,
            {'op': op, 'account': account, 'container': container,
             'obj': obj, 'headers': headers_out},
            os.path.join(async_dir, ohash[-3:], ohash + '-' +
                         normalize_timestamp(headers_out['x-timestamp'])),
            os.path.join(self.devices, objdevice, 'tmp'))
Example #54
0
    def test_get_info(self):
        # Test AccountBroker.get_info
        broker = AccountBroker(':memory:', account='test1')
        broker.initialize(normalize_timestamp('1'))

        info = broker.get_info()
        self.assertEquals(info['account'], 'test1')
        self.assertEquals(info['hash'], '00000000000000000000000000000000')

        info = broker.get_info()
        self.assertEquals(info['container_count'], 0)

        broker.put_container('c1', normalize_timestamp(time()), 0, 0, 0)
        info = broker.get_info()
        self.assertEquals(info['container_count'], 1)

        sleep(.00001)
        broker.put_container('c2', normalize_timestamp(time()), 0, 0, 0)
        info = broker.get_info()
        self.assertEquals(info['container_count'], 2)

        sleep(.00001)
        broker.put_container('c2', normalize_timestamp(time()), 0, 0, 0)
        info = broker.get_info()
        self.assertEquals(info['container_count'], 2)

        sleep(.00001)
        broker.put_container('c1', 0, normalize_timestamp(time()), 0, 0)
        info = broker.get_info()
        self.assertEquals(info['container_count'], 1)

        sleep(.00001)
        broker.put_container('c2', 0, normalize_timestamp(time()), 0, 0)
        info = broker.get_info()
        self.assertEquals(info['container_count'], 0)
Example #55
0
    def test_hash_cleanup_listdir(self):
        file_list = []

        def mock_listdir(path):
            return list(file_list)

        def mock_unlink(path):
            file_list.remove(os.path.basename(path))

        with unit_mock({'os.listdir': mock_listdir, 'os.unlink': mock_unlink}):
            # purge .data if there's a newer .ts
            file1 = normalize_timestamp(time()) + '.data'
            file2 = normalize_timestamp(time() + 1) + '.ts'
            file_list = [file1, file2]
            self.assertEquals(diskfile.hash_cleanup_listdir('/whatever'),
                              [file2])

            # purge .ts if there's a newer .data
            file1 = normalize_timestamp(time()) + '.ts'
            file2 = normalize_timestamp(time() + 1) + '.data'
            file_list = [file1, file2]
            self.assertEquals(diskfile.hash_cleanup_listdir('/whatever'),
                              [file2])

            # keep .meta and .data if meta newer than data
            file1 = normalize_timestamp(time()) + '.ts'
            file2 = normalize_timestamp(time() + 1) + '.data'
            file3 = normalize_timestamp(time() + 2) + '.meta'
            file_list = [file1, file2, file3]
            self.assertEquals(diskfile.hash_cleanup_listdir('/whatever'),
                              [file3, file2])

            # keep only latest of multiple .ts files
            file1 = normalize_timestamp(time()) + '.ts'
            file2 = normalize_timestamp(time() + 1) + '.ts'
            file3 = normalize_timestamp(time() + 2) + '.ts'
            file_list = [file1, file2, file3]
            self.assertEquals(diskfile.hash_cleanup_listdir('/whatever'),
                              [file3])
Example #56
0
    def test_failsafe_object_audit_will_swallow_errors_in_tests(self):
        timestamp = str(normalize_timestamp(time.time()))
        path = os.path.join(self.disk_file.datadir, timestamp + '.data')
        mkdirs(self.disk_file.datadir)
        with open(path, 'w') as f:
            write_metadata(f, {'name': '/a/c/o'})
        auditor_worker = auditor.AuditorWorker(self.conf, self.logger)

        def blowup(*args):
            raise NameError('tpyo')
        with mock.patch('swift.obj.diskfile.DiskFile',
                        blowup):
            auditor_worker.failsafe_object_audit(path, 'sda', '0')
        self.assertEquals(auditor_worker.errors, 1)