def run(self): try: # Copy phase self.manager.set_phase('copy') api.start_migration(self.collection_name, self.shard_key, self.new_location) oplog_pos = _get_oplog_pos(self.collection_name, self.shard_key) _do_copy(self.collection_name, self.shard_key, self.manager) # Sync phase self.manager.set_phase('sync') start_sync_time = time.time() api.set_shard_to_migration_status( self.collection_name, self.shard_key, metadata.ShardStatus.MIGRATING_SYNC) oplog_pos = _sync_from_oplog(self.collection_name, self.shard_key, oplog_pos) # Ensure that the sync has taken at least as long as our caching # time to ensure that all writes will get paused at approximately # the same time. while time.time() < start_sync_time + api.get_caching_duration(): time.sleep(0.05) oplog_pos = _sync_from_oplog(self.collection_name, self.shard_key, oplog_pos) # Now all the caching of metadata should be stopped for this shard. # We can flip to being paused at destination and wait ~100ms for any # pending updates/inserts to be performed. If these are taking # longer than 100ms then you are in a bad place and should rethink # sharding. api.set_shard_to_migration_status( self.collection_name, self.shard_key, metadata.ShardStatus.POST_MIGRATION_PAUSED_AT_DESTINATION) time.sleep(0.1) # Sync the oplog one final time to catch any writes that were # performed during the pause _sync_from_oplog(self.collection_name, self.shard_key, oplog_pos) # Delete phase self.manager.set_phase('delete') api.set_shard_to_migration_status( self.collection_name, self.shard_key, metadata.ShardStatus.POST_MIGRATION_DELETE) _delete_source_data(self.collection_name, self.shard_key, self.manager) api.set_shard_at_rest(self.collection_name, self.shard_key, self.new_location, force=True) self.manager.set_phase('complete') except: self.exception = sys.exc_info() raise finally: close_thread_connections(threading.current_thread())
def run(self): try: blue('* Starting migration') api.start_migration(self.collection_name, self.shard_key, self.new_location) # Copy phase blue('* Doing copy') oplog_pos = _get_oplog_pos(self.collection_name, self.shard_key) _do_copy(self.collection_name, self.shard_key, self.insert_throttle) # Sync phase blue('* Initial oplog sync') start_sync_time = time.time() api.set_shard_to_migration_status( self.collection_name, self.shard_key, metadata.ShardStatus.MIGRATING_SYNC) oplog_pos = _sync_from_oplog(self.collection_name, self.shard_key, oplog_pos) # Ensure that the sync has taken at least as long as our caching time # to ensure that all writes will get paused at approximately the same # time. while time.time() < start_sync_time + api.get_caching_duration(): time.sleep(0.05) oplog_pos = _sync_from_oplog(self.collection_name, self.shard_key, oplog_pos) # Now all the caching of metadata should be stopped for this shard. # We can flip to being paused at destination and wait ~100ms for any # pending updates/inserts to be performed. If these are taking longer # than 100ms then you are in a bad place and should rethink sharding. blue('* Pausing at destination') api.set_shard_to_migration_status( self.collection_name, self.shard_key, metadata.ShardStatus.POST_MIGRATION_PAUSED_AT_DESTINATION) time.sleep(0.1) blue('* Syncing oplog once more') _sync_from_oplog(self.collection_name, self.shard_key, oplog_pos) # Delete phase blue('* Doing deletion') api.set_shard_to_migration_status( self.collection_name, self.shard_key, metadata.ShardStatus.POST_MIGRATION_DELETE) _delete_source_data(self.collection_name, self.shard_key, delete_throttle=self.delete_throttle) api.set_shard_at_rest(self.collection_name, self.shard_key, self.new_location, force=True) blue('* Done') except: self.exception = sys.exc_info() raise
def run(self): try: blue('* Starting migration') api.start_migration( self.collection_name, self.shard_key, self.new_location) # Copy phase blue('* Doing copy') oplog_pos = _get_oplog_pos(self.collection_name, self.shard_key) _do_copy(self.collection_name, self.shard_key, self.insert_throttle) # Sync phase blue('* Initial oplog sync') start_sync_time = time.time() api.set_shard_to_migration_status( self.collection_name, self.shard_key, metadata.ShardStatus.MIGRATING_SYNC) oplog_pos = _sync_from_oplog( self.collection_name, self.shard_key, oplog_pos) # Ensure that the sync has taken at least as long as our caching time # to ensure that all writes will get paused at approximately the same # time. while time.time() < start_sync_time + api.get_caching_duration(): time.sleep(0.05) oplog_pos = _sync_from_oplog( self.collection_name, self.shard_key, oplog_pos) # Now all the caching of metadata should be stopped for this shard. # We can flip to being paused at destination and wait ~100ms for any # pending updates/inserts to be performed. If these are taking longer # than 100ms then you are in a bad place and should rethink sharding. blue('* Pausing at destination') api.set_shard_to_migration_status( self.collection_name, self.shard_key, metadata.ShardStatus.POST_MIGRATION_PAUSED_AT_DESTINATION) time.sleep(0.1) blue('* Syncing oplog once more') _sync_from_oplog( self.collection_name, self.shard_key, oplog_pos) # Delete phase blue('* Doing deletion') api.set_shard_to_migration_status( self.collection_name, self.shard_key, metadata.ShardStatus.POST_MIGRATION_DELETE) _delete_source_data( self.collection_name, self.shard_key, delete_throttle=self.delete_throttle) api.set_shard_at_rest( self.collection_name, self.shard_key, self.new_location, force=True) blue('* Done') except: self.exception = sys.exc_info() raise
def run(self): try: # Copy phase self.manager.set_phase('copy') api.start_migration( self.collection_name, self.shard_key, self.new_location) oplog_pos = _get_oplog_pos(self.collection_name, self.shard_key) _do_copy(self.collection_name, self.shard_key, self.manager) # Sync phase self.manager.set_phase('sync') start_sync_time = time.time() api.set_shard_to_migration_status( self.collection_name, self.shard_key, metadata.ShardStatus.MIGRATING_SYNC) oplog_pos = _sync_from_oplog( self.collection_name, self.shard_key, oplog_pos) # Ensure that the sync has taken at least as long as our caching time # to ensure that all writes will get paused at approximately the same # time. while time.time() < start_sync_time + api.get_caching_duration(): time.sleep(0.05) oplog_pos = _sync_from_oplog( self.collection_name, self.shard_key, oplog_pos) # Now all the caching of metadata should be stopped for this shard. # We can flip to being paused at destination and wait ~100ms for any # pending updates/inserts to be performed. If these are taking longer # than 100ms then you are in a bad place and should rethink sharding. api.set_shard_to_migration_status( self.collection_name, self.shard_key, metadata.ShardStatus.POST_MIGRATION_PAUSED_AT_DESTINATION) time.sleep(0.1) # Sync the oplog one final time to catch any writes that were # performed during the pause _sync_from_oplog( self.collection_name, self.shard_key, oplog_pos) # Delete phase self.manager.set_phase('delete') api.set_shard_to_migration_status( self.collection_name, self.shard_key, metadata.ShardStatus.POST_MIGRATION_DELETE) _delete_source_data( self.collection_name, self.shard_key, self.manager) api.set_shard_at_rest( self.collection_name, self.shard_key, self.new_location, force=True) self.manager.set_phase('complete') except: close_thread_connections(threading.current_thread()) self.exception = sys.exc_info() raise