def test_post_replicate_hook(self):
     ts = (Timestamp(t).internal for t in itertools.count(int(time.time())))
     broker = self._get_broker('a', 'c', node_index=0)
     broker.initialize(ts.next(), 0)
     broker.put_object('foo',
                       ts.next(),
                       0,
                       'text/plain',
                       'xyz',
                       deleted=0,
                       storage_policy_index=0)
     info = broker.get_replication_info()
     self.assertEqual(1, info['max_row'])
     self.assertEqual(-1, broker.get_reconciler_sync())
     daemon = replicator.ContainerReplicator({})
     calls = []
     with self._wrap_update_reconciler_sync(broker, calls):
         daemon._post_replicate_hook(broker, info, [])
     self.assertEqual(1, len(calls))
     # repeated call to _post_replicate_hook with no change to info
     # should not call update_reconciler_sync
     calls = []
     with self._wrap_update_reconciler_sync(broker, calls):
         daemon._post_replicate_hook(broker, info, [])
     self.assertEqual(0, len(calls))
 def test_sync_status_change(self):
     # setup a local container
     broker = self._get_broker('a', 'c', node_index=0)
     put_timestamp = time.time()
     broker.initialize(put_timestamp, POLICIES.default.idx)
     # setup remote container
     remote_broker = self._get_broker('a', 'c', node_index=1)
     remote_broker.initialize(put_timestamp, POLICIES.default.idx)
     # delete local container
     broker.delete_db(time.time())
     # replicate
     daemon = replicator.ContainerReplicator({})
     part, node = self._get_broker_part_node(remote_broker)
     info = broker.get_replication_info()
     success = daemon._repl_to_node(node, broker, part, info)
     # nothing to do
     self.assertTrue(success)
     self.assertEqual(1, daemon.stats['no_change'])
     # status in sync
     self.assertTrue(remote_broker.is_deleted())
     info = broker.get_info()
     remote_info = remote_broker.get_info()
     self.assert_(
         Timestamp(remote_info['status_changed_at']) > Timestamp(
             remote_info['put_timestamp']),
         'remote status_changed_at (%s) is not '
         'greater than put_timestamp (%s)' %
         (remote_info['status_changed_at'], remote_info['put_timestamp']))
     self.assert_(
         Timestamp(remote_info['status_changed_at']) > Timestamp(
             info['status_changed_at']),
         'remote status_changed_at (%s) is not '
         'greater than local status_changed_at (%s)' %
         (remote_info['status_changed_at'], info['status_changed_at']))
Esempio n. 3
0
 def test_sync_remote_with_timings(self):
     # setup a local container
     broker = self._get_broker('a', 'c', node_index=0)
     put_timestamp = time.time()
     broker.initialize(put_timestamp, POLICIES.default.idx)
     broker.update_metadata(
         {'x-container-meta-test': ('foo', put_timestamp)})
     # setup remote container
     remote_broker = self._get_broker('a', 'c', node_index=1)
     remote_broker.initialize(time.time(), POLICIES.default.idx)
     timestamp = time.time()
     for db in (broker, remote_broker):
         db.put_object('/a/c/o', timestamp, 0, 'content-type', 'etag',
                       storage_policy_index=db.storage_policy_index)
     # replicate
     daemon = replicator.ContainerReplicator({})
     part, node = self._get_broker_part_node(remote_broker)
     info = broker.get_replication_info()
     with mock.patch.object(db_replicator, 'DEBUG_TIMINGS_THRESHOLD', 0):
         success = daemon._repl_to_node(node, broker, part, info)
     # nothing to do
     self.assertTrue(success)
     self.assertEqual(1, daemon.stats['no_change'])
     expected_timings = ('info', 'update_metadata', 'merge_timestamps',
                         'get_sync', 'merge_syncs')
     debug_lines = self.rpc.logger.logger.get_lines_for_level('debug')
     self.assertEqual(len(expected_timings), len(debug_lines))
     for metric in expected_timings:
         expected = 'replicator-rpc-sync time for %s:' % metric
         self.assert_(any(expected in line for line in debug_lines),
                      'debug timing %r was not in %r' % (
                          expected, debug_lines))
Esempio n. 4
0
 def test_report_up_to_date(self):
     repl = replicator.ContainerReplicator({})
     info = {'put_timestamp': Timestamp(1).internal,
             'delete_timestamp': Timestamp(0).internal,
             'object_count': 0,
             'bytes_used': 0,
             'reported_put_timestamp': Timestamp(1).internal,
             'reported_delete_timestamp': Timestamp(0).internal,
             'reported_object_count': 0,
             'reported_bytes_used': 0}
     self.assertTrue(repl.report_up_to_date(info))
     info['delete_timestamp'] = Timestamp(2).internal
     self.assertFalse(repl.report_up_to_date(info))
     info['reported_delete_timestamp'] = Timestamp(2).internal
     self.assertTrue(repl.report_up_to_date(info))
     info['object_count'] = 1
     self.assertFalse(repl.report_up_to_date(info))
     info['reported_object_count'] = 1
     self.assertTrue(repl.report_up_to_date(info))
     info['bytes_used'] = 1
     self.assertFalse(repl.report_up_to_date(info))
     info['reported_bytes_used'] = 1
     self.assertTrue(repl.report_up_to_date(info))
     info['put_timestamp'] = Timestamp(3).internal
     self.assertFalse(repl.report_up_to_date(info))
     info['reported_put_timestamp'] = Timestamp(3).internal
     self.assertTrue(repl.report_up_to_date(info))
Esempio n. 5
0
    def test_sync_remote_can_not_keep_up(self):
        put_timestamp = time.time()
        # create "local" broker
        broker = self._get_broker('a', 'c', node_index=0)
        broker.initialize(put_timestamp, POLICIES.default.idx)
        # create "remote" broker
        remote_broker = self._get_broker('a', 'c', node_index=1)
        remote_broker.initialize(put_timestamp, POLICIES.default.idx)
        # add some rows to both db's
        for i in range(10):
            put_timestamp = time.time()
            for db in (broker, remote_broker):
                obj_name = 'o_%s' % i
                db.put_object(obj_name, put_timestamp, 0,
                              'content-type', 'etag',
                              storage_policy_index=db.storage_policy_index)
        # setup REPLICATE callback to simulate adding rows during merge_items
        missing_counter = itertools.count()

        def put_more_objects(op, *args):
            if op != 'merge_items':
                return
            path = '/a/c/o_missing_%s' % missing_counter.next()
            broker.put_object(path, time.time(), 0, 'content-type', 'etag',
                              storage_policy_index=db.storage_policy_index)
        test_db_replicator.FakeReplConnection = \
            test_db_replicator.attach_fake_replication_rpc(
                self.rpc, replicate_hook=put_more_objects)
        db_replicator.ReplConnection = test_db_replicator.FakeReplConnection
        # and add one extra to local db to trigger merge_items
        put_more_objects('merge_items')
        # limit number of times we'll call merge_items
        daemon = replicator.ContainerReplicator({'max_diffs': 10})
        # replicate
        part, node = self._get_broker_part_node(remote_broker)
        info = broker.get_replication_info()
        success = daemon._repl_to_node(node, broker, part, info)
        self.assertFalse(success)
        # back off on the PUTs during replication...
        FakeReplConnection = test_db_replicator.attach_fake_replication_rpc(
            self.rpc, replicate_hook=None)
        db_replicator.ReplConnection = FakeReplConnection
        # retry replication
        info = broker.get_replication_info()
        success = daemon._repl_to_node(node, broker, part, info)
        self.assertTrue(success)
        # row merge
        self.assertEqual(2, daemon.stats['diff'])
        self.assertEqual(1, daemon.stats['diff_capped'])
        local_info = self._get_broker(
            'a', 'c', node_index=0).get_info()
        remote_info = self._get_broker(
            'a', 'c', node_index=1).get_info()
        for k, v in local_info.items():
            if k == 'id':
                continue
            self.assertEqual(remote_info[k], v,
                             "mismatch remote %s %r != %r" % (
                                 k, remote_info[k], v))
 def test_sync_remote_in_sync(self):
     # setup a local container
     broker = self._get_broker('a', 'c', node_index=0)
     put_timestamp = time.time()
     broker.initialize(put_timestamp, POLICIES.default.idx)
     # "replicate" to same database
     node = {'device': 'sdb', 'replication_ip': '127.0.0.1'}
     daemon = replicator.ContainerReplicator({})
     # replicate
     part, node = self._get_broker_part_node(broker)
     info = broker.get_replication_info()
     success = daemon._repl_to_node(node, broker, part, info)
     # nothing to do
     self.assertTrue(success)
     self.assertEqual(1, daemon.stats['no_change'])
Esempio n. 7
0
    def test_rsync_failure(self):
        broker = self._get_broker('a', 'c', node_index=0)
        put_timestamp = time.time()
        broker.initialize(put_timestamp, POLICIES.default.idx)
        # "replicate" to different device
        daemon = replicator.ContainerReplicator({})

        def _rsync_file(*args, **kwargs):
            return False
        daemon._rsync_file = _rsync_file

        # replicate
        part, local_node = self._get_broker_part_node(broker)
        node = random.choice([n for n in self._ring.devs if n != local_node])
        info = broker.get_replication_info()
        success = daemon._repl_to_node(node, broker, part, info)
        self.assertFalse(success)
 def test_sync_remote_missing_one_rows(self):
     put_timestamp = time.time()
     # create "local" broker
     broker = self._get_broker('a', 'c', node_index=0)
     broker.initialize(put_timestamp, POLICIES.default.idx)
     # create "remote" broker
     remote_broker = self._get_broker('a', 'c', node_index=1)
     remote_broker.initialize(put_timestamp, POLICIES.default.idx)
     # add some rows to both db
     for i in range(10):
         put_timestamp = time.time()
         for db in (broker, remote_broker):
             path = '/a/c/o_%s' % i
             db.put_object(path,
                           put_timestamp,
                           0,
                           'content-type',
                           'etag',
                           storage_policy_index=db.storage_policy_index)
     # now a row to the "local" broker only
     broker.put_object('/a/c/o_missing',
                       time.time(),
                       0,
                       'content-type',
                       'etag',
                       storage_policy_index=broker.storage_policy_index)
     # replicate
     daemon = replicator.ContainerReplicator({})
     part, node = self._get_broker_part_node(remote_broker)
     info = broker.get_replication_info()
     success = daemon._repl_to_node(node, broker, part, info)
     self.assertTrue(success)
     # row merge
     self.assertEqual(1, daemon.stats['diff'])
     local_info = self._get_broker('a', 'c', node_index=0).get_info()
     remote_info = self._get_broker('a', 'c', node_index=1).get_info()
     for k, v in local_info.items():
         if k == 'id':
             continue
         self.assertEqual(
             remote_info[k], v,
             "mismatch remote %s %r != %r" % (k, remote_info[k], v))
Esempio n. 9
0
    def test_sync_merge_timestamps(self):
        ts = (Timestamp(t).internal for t in
              itertools.count(int(time.time())))
        # setup a local container
        broker = self._get_broker('a', 'c', node_index=0)
        put_timestamp = ts.next()
        broker.initialize(put_timestamp, POLICIES.default.idx)
        # setup remote container
        remote_broker = self._get_broker('a', 'c', node_index=1)
        remote_put_timestamp = ts.next()
        remote_broker.initialize(remote_put_timestamp, POLICIES.default.idx)
        # replicate, expect call to merge_timestamps on remote and local
        daemon = replicator.ContainerReplicator({})
        part, node = self._get_broker_part_node(remote_broker)
        info = broker.get_replication_info()
        local_calls = []
        remote_calls = []
        with self._wrap_merge_timestamps(broker, local_calls):
            with self._wrap_merge_timestamps(broker, remote_calls):
                success = daemon._repl_to_node(node, broker, part, info)
        self.assertTrue(success)
        self.assertEqual(1, len(remote_calls))
        self.assertEqual(1, len(local_calls))
        self.assertEqual(remote_put_timestamp,
                         broker.get_info()['put_timestamp'])
        self.assertEqual(remote_put_timestamp,
                         remote_broker.get_info()['put_timestamp'])

        # replicate again, no changes so expect no calls to merge_timestamps
        info = broker.get_replication_info()
        local_calls = []
        remote_calls = []
        with self._wrap_merge_timestamps(broker, local_calls):
            with self._wrap_merge_timestamps(broker, remote_calls):
                success = daemon._repl_to_node(node, broker, part, info)
        self.assertTrue(success)
        self.assertEqual(0, len(remote_calls))
        self.assertEqual(0, len(local_calls))
        self.assertEqual(remote_put_timestamp,
                         broker.get_info()['put_timestamp'])
        self.assertEqual(remote_put_timestamp,
                         remote_broker.get_info()['put_timestamp'])
    def test_sync_remote_missing_most_rows(self):
        put_timestamp = time.time()
        # create "local" broker
        broker = self._get_broker('a', 'c', node_index=0)
        broker.initialize(put_timestamp, POLICIES.default.idx)
        # create "remote" broker
        remote_broker = self._get_broker('a', 'c', node_index=1)
        remote_broker.initialize(put_timestamp, POLICIES.default.idx)
        # add a row to "local" db
        broker.put_object('/a/c/o',
                          time.time(),
                          0,
                          'content-type',
                          'etag',
                          storage_policy_index=broker.storage_policy_index)
        # replicate
        node = {'device': 'sdc', 'replication_ip': '127.0.0.1'}
        daemon = replicator.ContainerReplicator({})

        def _rsync_file(db_file, remote_file, **kwargs):
            remote_server, remote_path = remote_file.split('/', 1)
            dest_path = os.path.join(self.root, remote_path)
            shutil.copy(db_file, dest_path)
            return True

        daemon._rsync_file = _rsync_file
        part, node = self._get_broker_part_node(remote_broker)
        info = broker.get_replication_info()
        success = daemon._repl_to_node(node, broker, part, info)
        self.assertTrue(success)
        # row merge
        self.assertEqual(1, daemon.stats['remote_merge'])
        local_info = self._get_broker('a', 'c', node_index=0).get_info()
        remote_info = self._get_broker('a', 'c', node_index=1).get_info()
        for k, v in local_info.items():
            if k == 'id':
                continue
            self.assertEqual(
                remote_info[k], v,
                "mismatch remote %s %r != %r" % (k, remote_info[k], v))
 def test_report_up_to_date(self):
     broker = self._get_broker('a', 'c', node_index=0)
     broker.initialize(Timestamp(1).internal, int(POLICIES.default))
     info = broker.get_info()
     broker.reported(info['put_timestamp'], info['delete_timestamp'],
                     info['object_count'], info['bytes_used'])
     full_info = broker.get_replication_info()
     expected_info = {
         'put_timestamp': Timestamp(1).internal,
         'delete_timestamp': '0',
         'count': 0,
         'bytes_used': 0,
         'reported_put_timestamp': Timestamp(1).internal,
         'reported_delete_timestamp': '0',
         'reported_object_count': 0,
         'reported_bytes_used': 0
     }
     for key, value in expected_info.items():
         msg = 'expected value for %r, %r != %r' % (key, full_info[key],
                                                    value)
         self.assertEqual(full_info[key], value, msg)
     repl = replicator.ContainerReplicator({})
     self.assertTrue(repl.report_up_to_date(full_info))
     full_info['delete_timestamp'] = Timestamp(2).internal
     self.assertFalse(repl.report_up_to_date(full_info))
     full_info['reported_delete_timestamp'] = Timestamp(2).internal
     self.assertTrue(repl.report_up_to_date(full_info))
     full_info['count'] = 1
     self.assertFalse(repl.report_up_to_date(full_info))
     full_info['reported_object_count'] = 1
     self.assertTrue(repl.report_up_to_date(full_info))
     full_info['bytes_used'] = 1
     self.assertFalse(repl.report_up_to_date(full_info))
     full_info['reported_bytes_used'] = 1
     self.assertTrue(repl.report_up_to_date(full_info))
     full_info['put_timestamp'] = Timestamp(3).internal
     self.assertFalse(repl.report_up_to_date(full_info))
     full_info['reported_put_timestamp'] = Timestamp(3).internal
     self.assertTrue(repl.report_up_to_date(full_info))