Ejemplo n.º 1
0
    def test_multishard_count_with_motion(self):
        api.set_shard_at_rest('dummy', 1, "dest1/test_sharding")
        api.set_shard_at_rest('dummy', 2, "dest1/test_sharding")

        doc1 = {'x': 1, 'y': 1}
        doc2 = {'x': 1, 'y': 2}
        doc3 = {'x': 2, 'y': 1}
        doc4 = {'x': 2, 'y': 2}
        self.db1.dummy.insert(doc1)
        self.db1.dummy.insert(doc2)
        self.db1.dummy.insert(doc3)
        self.db1.dummy.insert(doc4)

        results = operations.multishard_find('dummy', {}).count()
        self.assertEquals(4, results)

        # Mimic the shard now being in the second location and there being
        # documents left here
        api.start_migration('dummy', 2, "dest2/test_sharding")
        api.set_shard_to_migration_status(
            'dummy', 2, api.ShardStatus.POST_MIGRATION_PAUSED_AT_DESTINATION)

        self.db2.dummy.insert(doc3)
        self.db2.dummy.insert(doc4)

        results = operations.multishard_find('dummy', {}).count()
        self.assertEquals(4, results)
Ejemplo n.º 2
0
    def run(self):
        try:
            blue('* Starting migration')
            api.start_migration(self.collection_name, self.shard_key,
                                self.new_location)

            # Copy phase
            blue('* Doing copy')
            oplog_pos = _get_oplog_pos(self.collection_name, self.shard_key)
            _do_copy(self.collection_name, self.shard_key,
                     self.insert_throttle)

            # Sync phase
            blue('* Initial oplog sync')
            start_sync_time = time.time()
            api.set_shard_to_migration_status(
                self.collection_name, self.shard_key,
                metadata.ShardStatus.MIGRATING_SYNC)
            oplog_pos = _sync_from_oplog(self.collection_name, self.shard_key,
                                         oplog_pos)

            # Ensure that the sync has taken at least as long as our caching time
            # to ensure that all writes will get paused at approximately the same
            # time.
            while time.time() < start_sync_time + api.get_caching_duration():
                time.sleep(0.05)
                oplog_pos = _sync_from_oplog(self.collection_name,
                                             self.shard_key, oplog_pos)

            # Now all the caching of metadata should be stopped for this shard.
            # We can flip to being paused at destination and wait ~100ms for any
            # pending updates/inserts to be performed. If these are taking longer
            # than 100ms then you are in a bad place and should rethink sharding.
            blue('* Pausing at destination')
            api.set_shard_to_migration_status(
                self.collection_name, self.shard_key,
                metadata.ShardStatus.POST_MIGRATION_PAUSED_AT_DESTINATION)
            time.sleep(0.1)

            blue('* Syncing oplog once more')
            _sync_from_oplog(self.collection_name, self.shard_key, oplog_pos)

            # Delete phase
            blue('* Doing deletion')
            api.set_shard_to_migration_status(
                self.collection_name, self.shard_key,
                metadata.ShardStatus.POST_MIGRATION_DELETE)
            _delete_source_data(self.collection_name,
                                self.shard_key,
                                delete_throttle=self.delete_throttle)

            api.set_shard_at_rest(self.collection_name,
                                  self.shard_key,
                                  self.new_location,
                                  force=True)

            blue('* Done')
        except:
            self.exception = sys.exc_info()
            raise
Ejemplo n.º 3
0
    def test_update(self):
        # Put the same document in multiple locations (a mid-migration status)
        # then do an update and ensure that only the correct place has been
        # updated.
        api.set_shard_at_rest('dummy', 1, "dest1/test_sharding")
        doc1 = {'x': 1, 'y': 1}
        self.db1.dummy.insert(doc1)

        api.start_migration('dummy', 1, 'dest2/test_sharding')
        api.set_shard_to_migration_status('dummy', 1,
                                          api.ShardStatus.MIGRATING_COPY)
        self.db2.dummy.insert(doc1)

        result = operations.multishard_update('dummy', {}, {'$inc': {'y': 1}})
        self.assertEquals(1, result['n'])

        # Query the correct shard first and see that the counter has been
        # incremented
        result, = operations.multishard_find('dummy', {'x': 1})
        self.assertEquals(2, result['y'])

        # Now spoof the metadata such that the system thinks the data is on
        # shard2. The counter should still be 1 here.
        api.set_shard_at_rest('dummy', 1, "dest2/test_sharding", force=True)
        result, = operations.multishard_find('dummy', {'x': 1})
        self.assertEquals(1, result['y'])
Ejemplo n.º 4
0
    def test_sync_uses_correct_connection(self):
        """This tests for a bug found during a rollout. The connection for the
        metadata was assumed to be the same connection as the source data was
        going to be coming from. This is *not* always the case.
        """
        # To test this a migration from new to old will expose the bug
        api.set_shard_at_rest('dummy', 1, "dest2/test_sharding")
        api.start_migration('dummy', 1, "dest1/test_sharding")

        # Mimic the state the shard would be in after a document was copied
        # from one location to another
        doc1 = {'x': 1, 'y': 1}
        doc1['_id'] = self.db1.dummy.insert(doc1)
        self.db2.dummy.insert(doc1)

        # Get the initial oplog position, do an update and then sync from the
        # initial position
        initial_oplog_pos = sharder._get_oplog_pos('dummy', 1)
        self.db2.dummy.update({'x': 1}, {'$inc': {'y': 1}})
        api.set_shard_to_migration_status(
            'dummy', 1, api.ShardStatus.MIGRATING_SYNC)
        sharder._sync_from_oplog('dummy', 1, initial_oplog_pos)

        # The data on the first database should now reflect the update that
        # went through
        doc2, = self.db1.dummy.find({})
        self.assertEquals(2, doc2['y'])
Ejemplo n.º 5
0
    def test_multishard_count_with_motion(self):
        api.set_shard_at_rest('dummy', 1, "dest1/test_sharding")
        api.set_shard_at_rest('dummy', 2, "dest1/test_sharding")

        doc1 = {'x': 1, 'y': 1}
        doc2 = {'x': 1, 'y': 2}
        doc3 = {'x': 2, 'y': 1}
        doc4 = {'x': 2, 'y': 2}
        self.db1.dummy.insert(doc1)
        self.db1.dummy.insert(doc2)
        self.db1.dummy.insert(doc3)
        self.db1.dummy.insert(doc4)

        results = operations.multishard_find('dummy', {}).count()
        self.assertEquals(4, results)

        # Mimic the shard now being in the second location and there being
        # documents left here
        api.start_migration('dummy', 2, "dest2/test_sharding")
        api.set_shard_to_migration_status(
            'dummy', 2, api.ShardStatus.POST_MIGRATION_PAUSED_AT_DESTINATION)

        self.db2.dummy.insert(doc3)
        self.db2.dummy.insert(doc4)

        results = operations.multishard_find('dummy', {}).count()
        self.assertEquals(4, results)
Ejemplo n.º 6
0
    def run(self):
        try:
            # Copy phase
            self.manager.set_phase('copy')
            api.start_migration(self.collection_name, self.shard_key,
                                self.new_location)

            oplog_pos = _get_oplog_pos(self.collection_name, self.shard_key)
            _do_copy(self.collection_name, self.shard_key, self.manager)

            # Sync phase
            self.manager.set_phase('sync')
            start_sync_time = time.time()
            api.set_shard_to_migration_status(
                self.collection_name, self.shard_key,
                metadata.ShardStatus.MIGRATING_SYNC)
            oplog_pos = _sync_from_oplog(self.collection_name, self.shard_key,
                                         oplog_pos)

            # Ensure that the sync has taken at least as long as our caching
            # time to ensure that all writes will get paused at approximately
            # the same time.
            while time.time() < start_sync_time + api.get_caching_duration():
                time.sleep(0.05)
                oplog_pos = _sync_from_oplog(self.collection_name,
                                             self.shard_key, oplog_pos)

            # Now all the caching of metadata should be stopped for this shard.
            # We can flip to being paused at destination and wait ~100ms for any
            # pending updates/inserts to be performed. If these are taking
            # longer than 100ms then you are in a bad place and should rethink
            # sharding.
            api.set_shard_to_migration_status(
                self.collection_name, self.shard_key,
                metadata.ShardStatus.POST_MIGRATION_PAUSED_AT_DESTINATION)
            time.sleep(0.1)

            # Sync the oplog one final time to catch any writes that were
            # performed during the pause
            _sync_from_oplog(self.collection_name, self.shard_key, oplog_pos)

            # Delete phase
            self.manager.set_phase('delete')
            api.set_shard_to_migration_status(
                self.collection_name, self.shard_key,
                metadata.ShardStatus.POST_MIGRATION_DELETE)
            _delete_source_data(self.collection_name, self.shard_key,
                                self.manager)

            api.set_shard_at_rest(self.collection_name,
                                  self.shard_key,
                                  self.new_location,
                                  force=True)

            self.manager.set_phase('complete')
        except:
            self.exception = sys.exc_info()
            raise
        finally:
            close_thread_connections(threading.current_thread())
Ejemplo n.º 7
0
    def test_update(self):
        # Put the same document in multiple locations (a mid-migration status)
        # then do an update and ensure that only the correct place has been
        # updated.
        api.set_shard_at_rest('dummy', 1, "dest1/test_sharding")
        doc1 = {'x': 1, 'y': 1}
        self.db1.dummy.insert(doc1)

        api.start_migration('dummy', 1, 'dest2/test_sharding')
        api.set_shard_to_migration_status(
            'dummy', 1, api.ShardStatus.MIGRATING_COPY)
        self.db2.dummy.insert(doc1)

        result = operations.multishard_update('dummy', {}, {'$inc': {'y': 1}})
        self.assertEquals(1, result['n'])

        # Query the correct shard first and see that the counter has been
        # incremented
        result, = operations.multishard_find('dummy', {'x': 1})
        self.assertEquals(2, result['y'])

        # Now spoof the metadata such that the system thinks the data is on
        # shard2. The counter should still be 1 here.
        api.set_shard_at_rest('dummy', 1, "dest2/test_sharding", force=True)
        result, = operations.multishard_find('dummy', {'x': 1})
        self.assertEquals(1, result['y'])
Ejemplo n.º 8
0
    def test_sync_uses_correct_connection(self):
        """This tests for a bug found during a rollout. The connection for the
        metadata was assumed to be the same connection as the source data was
        going to be coming from. This is *not* always the case.
        """
        # To test this a migration from new to old will expose the bug
        api.set_shard_at_rest('dummy', 1, "dest2/test_sharding")
        api.start_migration('dummy', 1, "dest1/test_sharding")

        # Mimic the state the shard would be in after a document was copied
        # from one location to another
        doc1 = {'x': 1, 'y': 1}
        doc1['_id'] = self.db1.dummy.insert(doc1)
        self.db2.dummy.insert(doc1)

        # Get the initial oplog position, do an update and then sync from the
        # initial position
        initial_oplog_pos = sharder._get_oplog_pos('dummy', 1)
        self.db2.dummy.update({'x': 1}, {'$inc': {'y': 1}})
        api.set_shard_to_migration_status('dummy', 1,
                                          api.ShardStatus.MIGRATING_SYNC)
        sharder._sync_from_oplog('dummy', 1, initial_oplog_pos)

        # The data on the first database should now reflect the update that
        # went through
        doc2, = self.db1.dummy.find({})
        self.assertEquals(2, doc2['y'])
Ejemplo n.º 9
0
    def run(self):
        try:
            blue('* Starting migration')
            api.start_migration(
                self.collection_name, self.shard_key, self.new_location)

            # Copy phase
            blue('* Doing copy')
            oplog_pos = _get_oplog_pos(self.collection_name, self.shard_key)
            _do_copy(self.collection_name, self.shard_key, self.insert_throttle)

            # Sync phase
            blue('* Initial oplog sync')
            start_sync_time = time.time()
            api.set_shard_to_migration_status(
                self.collection_name, self.shard_key, metadata.ShardStatus.MIGRATING_SYNC)
            oplog_pos = _sync_from_oplog(
                self.collection_name, self.shard_key, oplog_pos)

            # Ensure that the sync has taken at least as long as our caching time
            # to ensure that all writes will get paused at approximately the same
            # time.
            while time.time() < start_sync_time + api.get_caching_duration():
                time.sleep(0.05)
                oplog_pos = _sync_from_oplog(
                    self.collection_name, self.shard_key, oplog_pos)

            # Now all the caching of metadata should be stopped for this shard.
            # We can flip to being paused at destination and wait ~100ms for any
            # pending updates/inserts to be performed. If these are taking longer
            # than 100ms then you are in a bad place and should rethink sharding.
            blue('* Pausing at destination')
            api.set_shard_to_migration_status(
                self.collection_name, self.shard_key,
                metadata.ShardStatus.POST_MIGRATION_PAUSED_AT_DESTINATION)
            time.sleep(0.1)
            
            blue('* Syncing oplog once more')
            _sync_from_oplog(
                self.collection_name, self.shard_key, oplog_pos)

            # Delete phase
            blue('* Doing deletion')
            api.set_shard_to_migration_status(
                self.collection_name, self.shard_key,
                metadata.ShardStatus.POST_MIGRATION_DELETE)
            _delete_source_data(
                self.collection_name, self.shard_key,
                delete_throttle=self.delete_throttle)

            api.set_shard_at_rest(
                self.collection_name, self.shard_key, self.new_location,
                force=True)

            blue('* Done')
        except:
            self.exception = sys.exc_info()
            raise
Ejemplo n.º 10
0
    def run(self):
        try:
            # Copy phase
            self.manager.set_phase('copy')
            api.start_migration(
                self.collection_name, self.shard_key, self.new_location)

            oplog_pos = _get_oplog_pos(self.collection_name, self.shard_key)
            _do_copy(self.collection_name, self.shard_key, self.manager)

            # Sync phase
            self.manager.set_phase('sync')
            start_sync_time = time.time()
            api.set_shard_to_migration_status(
                self.collection_name, self.shard_key, metadata.ShardStatus.MIGRATING_SYNC)
            oplog_pos = _sync_from_oplog(
                self.collection_name, self.shard_key, oplog_pos)

            # Ensure that the sync has taken at least as long as our caching time
            # to ensure that all writes will get paused at approximately the same
            # time.
            while time.time() < start_sync_time + api.get_caching_duration():
                time.sleep(0.05)
                oplog_pos = _sync_from_oplog(
                    self.collection_name, self.shard_key, oplog_pos)

            # Now all the caching of metadata should be stopped for this shard.
            # We can flip to being paused at destination and wait ~100ms for any
            # pending updates/inserts to be performed. If these are taking longer
            # than 100ms then you are in a bad place and should rethink sharding.
            api.set_shard_to_migration_status(
                self.collection_name, self.shard_key,
                metadata.ShardStatus.POST_MIGRATION_PAUSED_AT_DESTINATION)
            time.sleep(0.1)
            
            # Sync the oplog one final time to catch any writes that were
            # performed during the pause
            _sync_from_oplog(
                self.collection_name, self.shard_key, oplog_pos)

            # Delete phase
            self.manager.set_phase('delete')
            api.set_shard_to_migration_status(
                self.collection_name, self.shard_key,
                metadata.ShardStatus.POST_MIGRATION_DELETE)
            _delete_source_data(
                self.collection_name, self.shard_key, self.manager)

            api.set_shard_at_rest(
                self.collection_name, self.shard_key, self.new_location,
                force=True)

            self.manager.set_phase('complete')
        except:
            close_thread_connections(threading.current_thread())
            self.exception = sys.exc_info()
            raise
Ejemplo n.º 11
0
    def test_cannot_move_to_same_location(self):
        ensure_realm_exists('some_realm', 'some_field', 'some_collection')

        set_shard_at_rest('some_realm', 1, 'dest1/db')

        with self.assertRaises(Exception) as catcher:
            start_migration('some_realm', 1, 'dest1/db')
        self.assertEquals(catcher.exception.message,
                          'Shard is already at dest1/db')
Ejemplo n.º 12
0
    def test_cannot_move_to_same_location(self):
        ensure_realm_exists(
            'some_realm', 'some_field', 'some_collection', 'dest1/db')

        set_shard_at_rest('some_realm', 1, 'dest1/db')

        with self.assertRaises(Exception) as catcher:
            start_migration('some_realm', 1, 'dest1/db')
        self.assertEquals(
            catcher.exception.message, 'Shard is already at dest1/db')
Ejemplo n.º 13
0
    def test_basic_copy(self):
        api.set_shard_at_rest('dummy', 1, "dest1/test_sharding")
        doc1 = {'x': 1, 'y': 1}
        doc1['_id'] = self.db1.dummy.insert(doc1)

        api.start_migration('dummy', 1, "dest2/test_sharding")

        sharder._do_copy('dummy', 1)

        # The data should now be on the second database
        doc2, = self.db2.dummy.find({})
        self.assertEquals(doc1, doc2)
Ejemplo n.º 14
0
    def test_basic_copy(self):
        api.set_shard_at_rest("dummy", 1, "dest1/test_sharding")
        doc1 = {"x": 1, "y": 1}
        doc1["_id"] = self.db1.dummy.insert(doc1)

        api.start_migration("dummy", 1, "dest2/test_sharding")

        sharder._do_copy("dummy", 1)

        # The data should now be on the second database
        doc2, = self.db2.dummy.find({})
        self.assertEquals(doc1, doc2)
Ejemplo n.º 15
0
    def test_basic_copy(self):
        api.set_shard_at_rest('dummy', 1, "dest1/test_sharding")
        doc1 = {'x': 1, 'y': 1}
        doc1['_id'] = self.db1.dummy.insert(doc1)

        api.start_migration('dummy', 1, "dest2/test_sharding")

        manager = Mock(insert_throttle=None)
        sharder._do_copy('dummy', 1, manager)

        # The data should now be on the second database
        doc2, = self.db2.dummy.find({})
        self.assertEquals(doc1, doc2)
Ejemplo n.º 16
0
    def test_delete_after_migration(self):
        api.set_shard_at_rest("dummy", 1, "dest1/test_sharding")
        api.start_migration("dummy", 1, "dest2/test_sharding")

        # Mimic the state the shard would be in after a document was copied
        # from one location to another
        doc1 = {"x": 1, "y": 1}
        doc1["_id"] = self.db1.dummy.insert(doc1)
        self.db2.dummy.insert(doc1)

        api.set_shard_to_migration_status("dummy", 1, api.ShardStatus.POST_MIGRATION_DELETE)
        sharder._delete_source_data("dummy", 1)

        # The data on the first database should now be gone and the data
        # on the second database should be ok.
        self.assertEquals(0, self.db1.dummy.find({}).count())
        doc1_actual, = self.db2.dummy.find({})
        self.assertEquals(doc1, doc1_actual)
Ejemplo n.º 17
0
    def test_delete_after_migration(self):
        api.set_shard_at_rest('dummy', 1, "dest1/test_sharding")
        api.start_migration('dummy', 1, "dest2/test_sharding")

        # Mimic the state the shard would be in after a document was copied
        # from one location to another
        doc1 = {'x': 1, 'y': 1}
        doc1['_id'] = self.db1.dummy.insert(doc1)
        self.db2.dummy.insert(doc1)

        api.set_shard_to_migration_status(
            'dummy', 1, api.ShardStatus.POST_MIGRATION_DELETE)
        sharder._delete_source_data('dummy', 1)

        # The data on the first database should now be gone and the data
        # on the second database should be ok.
        self.assertEquals(0, self.db1.dummy.find({}).count())
        doc1_actual, = self.db2.dummy.find({})
        self.assertEquals(doc1, doc1_actual)
Ejemplo n.º 18
0
    def test_multishard_find_during_post_migration(self):
        # Indiciate a migration has started on shard #2 and insert a document
        # with the same ID into both databases with slightly different data in
        # each location. Then ensure we only get the result from shard #1
        api.set_shard_at_rest('dummy', 1, "dest1/test_sharding")
        api.set_shard_at_rest('dummy', 2, "dest1/test_sharding")
        api.start_migration('dummy', 2, "dest2/test_sharding")
        api.set_shard_to_migration_status(
            'dummy', 2, api.ShardStatus.POST_MIGRATION_PAUSED_AT_DESTINATION)
        doc1 = {'x': 1, 'y': 1}
        doc_id = bson.ObjectId()
        doc2_fresh = {'_id': doc_id, 'x': 2, 'y': 1, 'is_fresh': True}
        doc2_stale = {'_id': doc_id, 'x': 2, 'y': 1, 'is_fresh': False}
        self.db1.dummy.insert(doc1)
        self.db1.dummy.insert(doc2_fresh)
        self.db2.dummy.insert(doc2_stale)

        c = operations.multishard_find('dummy', {'y': 1})
        results = sorted(list(c), key=lambda d: d['x'])
        self.assertEquals([doc1, doc2_stale], results)
Ejemplo n.º 19
0
    def test_multishard_find_during_post_migration(self):
        # Indiciate a migration has started on shard #2 and insert a document
        # with the same ID into both databases with slightly different data in
        # each location. Then ensure we only get the result from shard #1
        api.set_shard_at_rest('dummy', 1, "dest1/test_sharding")
        api.set_shard_at_rest('dummy', 2, "dest1/test_sharding")
        api.start_migration('dummy', 2, "dest2/test_sharding")
        api.set_shard_to_migration_status(
            'dummy', 2, api.ShardStatus.POST_MIGRATION_PAUSED_AT_DESTINATION)
        doc1 = {'x': 1, 'y': 1}
        doc_id = bson.ObjectId()
        doc2_fresh = {'_id': doc_id, 'x': 2, 'y': 1, 'is_fresh': True}
        doc2_stale = {'_id': doc_id, 'x': 2, 'y': 1, 'is_fresh': False}
        self.db1.dummy.insert(doc1)
        self.db1.dummy.insert(doc2_fresh)
        self.db2.dummy.insert(doc2_stale)

        c = operations.multishard_find('dummy', {'y': 1})
        results = sorted(list(c), key=lambda d: d['x'])
        self.assertEquals([doc1, doc2_stale], results)
Ejemplo n.º 20
0
    def test_multishard_find_during_migration(self):
        # Indiciate a migration has started on shard #2 and insert a document
        # with the same ID into both databases with slightly different data in
        # each location. Then ensure we only get the result from shard #1
        api.set_shard_at_rest('dummy', 1, "dest1/test_sharding")
        api.set_shard_at_rest('dummy', 2, "dest1/test_sharding")
        # We require a shard at rest on the target server to ensure the location
        # is picked up as a queryable location
        api.set_shard_at_rest('dummy', 3, "dest2/test_sharding")
        api.start_migration('dummy', 2, "dest2/test_sharding")
        doc1 = {'x': 1, 'y': 1}
        doc_id = bson.ObjectId()
        doc2_fresh = {'_id': doc_id, 'x': 2, 'y': 1, 'is_fresh': True}
        doc2_stale = {'_id': doc_id, 'x': 2, 'y': 1, 'is_fresh': False}
        self.db1.dummy.insert(doc1)
        self.db1.dummy.insert(doc2_fresh)
        self.db2.dummy.insert(doc2_stale)

        c = operations.multishard_find('dummy', {'y': 1})
        results = sorted(list(c), key=lambda d: d['x'])
        self.assertEquals([doc1, doc2_fresh], results)
Ejemplo n.º 21
0
    def test_multishard_find_during_migration(self):
        # Indiciate a migration has started on shard #2 and insert a document
        # with the same ID into both databases with slightly different data in
        # each location. Then ensure we only get the result from shard #1
        api.set_shard_at_rest('dummy', 1, "dest1/test_sharding")
        api.set_shard_at_rest('dummy', 2, "dest1/test_sharding")
        # We require a shard at rest on the target server to ensure the location
        # is picked up as a queryable location
        api.set_shard_at_rest('dummy', 3, "dest2/test_sharding")
        api.start_migration('dummy', 2, "dest2/test_sharding")
        doc1 = {'x': 1, 'y': 1}
        doc_id = bson.ObjectId()
        doc2_fresh = {'_id': doc_id, 'x': 2, 'y': 1, 'is_fresh': True}
        doc2_stale = {'_id': doc_id, 'x': 2, 'y': 1, 'is_fresh': False}
        self.db1.dummy.insert(doc1)
        self.db1.dummy.insert(doc2_fresh)
        self.db2.dummy.insert(doc2_stale)

        c = operations.multishard_find('dummy', {'y': 1})
        results = sorted(list(c), key=lambda d: d['x'])
        self.assertEquals([doc1, doc2_fresh], results)
Ejemplo n.º 22
0
    def test_sync_after_copy(self):
        api.set_shard_at_rest("dummy", 1, "dest1/test_sharding")
        api.start_migration("dummy", 1, "dest2/test_sharding")

        # Mimic the state the shard would be in after a document was copied
        # from one location to another
        doc1 = {"x": 1, "y": 1}
        doc1["_id"] = self.db1.dummy.insert(doc1)
        self.db2.dummy.insert(doc1)

        # Get the initial oplog position, do an update and then sync from the
        # initial position
        initial_oplog_pos = sharder._get_oplog_pos()
        self.db1.dummy.update({"x": 1}, {"$inc": {"y": 1}})
        api.set_shard_to_migration_status("dummy", 1, api.ShardStatus.MIGRATING_SYNC)
        sharder._sync_from_oplog("dummy", 1, initial_oplog_pos)

        # The data on the second database should now reflect the update that
        # went through
        doc2, = self.db2.dummy.find({})
        self.assertEquals(2, doc2["y"])
Ejemplo n.º 23
0
    def test_sync_after_copy(self):
        api.set_shard_at_rest('dummy', 1, "dest1/test_sharding")
        api.start_migration('dummy', 1, "dest2/test_sharding")

        # Mimic the state the shard would be in after a document was copied
        # from one location to another
        doc1 = {'x': 1, 'y': 1}
        doc1['_id'] = self.db1.dummy.insert(doc1)
        self.db2.dummy.insert(doc1)

        # Get the initial oplog position, do an update and then sync from the
        # initial position
        initial_oplog_pos = sharder._get_oplog_pos('dummy', 1)
        self.db1.dummy.update({'x': 1}, {'$inc': {'y': 1}})
        api.set_shard_to_migration_status('dummy', 1,
                                          api.ShardStatus.MIGRATING_SYNC)
        sharder._sync_from_oplog('dummy', 1, initial_oplog_pos)

        # The data on the second database should now reflect the update that
        # went through
        doc2, = self.db2.dummy.find({})
        self.assertEquals(2, doc2['y'])
Ejemplo n.º 24
0
    def test_sync_ignores_other_collection(self):
        api.set_shard_at_rest('dummy', 1, "dest1/test_sharding")
        api.start_migration('dummy', 1, "dest2/test_sharding")

        # Mimic the state the shard would be in after a document was copied
        # from one location to another
        doc1 = {'x': 1, 'y': 1}
        doc1['_id'] = self.db1.dummy.insert(doc1)
        self.db2.dummy.insert(doc1)

        # Get the initial oplog position, do an update to a different collection
        # and then sync from the initial position
        initial_oplog_pos = sharder._get_oplog_pos('dummy', 1)
        self.db1.other_coll.insert(doc1)
        self.db1.other_coll.update({'x': 1}, {'$inc': {'y': 1}})
        api.set_shard_to_migration_status(
            'dummy', 1, api.ShardStatus.MIGRATING_SYNC)
        sharder._sync_from_oplog('dummy', 1, initial_oplog_pos)

        # The data on the second database should be in the same state as it
        # was before
        doc2, = self.db2.dummy.find({})
        self.assertEquals(1, doc2['y'])