def expected_configs(self, configs): new_conn = True for chunk_config in configs: if new_conn: redis_conn = self.mox.CreateMockAnything() db.redis_conn().AndReturn(redis_conn) redis_conn.__enter__().AndReturn(redis_conn) orm.Migration.get_latest( redis_conn, source_shard=chunk_config.source_shard, destination_shard=chunk_config.destination_shard, partition_val=chunk_config.partition_val ).AndReturn(None) migration = self.mox.CreateMockAnything() migration._redis = redis_conn orm.Migration(redis_conn).AndReturn(migration) conn = self.mox.CreateMockAnything() db.shard_connection(chunk_config.source_shard, read=True).AndReturn(context(conn)) conn.get_current_timestamp().AndReturn(123) migration.insert() else: redis_conn.__exit__(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()) new_conn = not new_conn tt = self.mox.CreateMockAnything() orm.Table(redis_conn).AndReturn(tt) tt.insert() queuer.queue_migrate_table(chunk_config)
def migration_task(self, chunk_config): try: with db.redis_conn() as redis_conn: cls(chunk_config, redis_conn).run() except (shinkansen.UnrecoverableError, NotImplementedError), exc: log.exception('UnrecoverableError running task %s, NOT requeueing %r', cls.__name__, chunk_config) raise
def get(self, source, destination, partition_val): with db.redis_conn() as redis: migration = orm.Migration.get_latest( redis, source_shard=source, destination_shard=destination, partition_val=partition_val) if migration is None: response = jsonify({'error': 'No migrations found'}) response.status_code = 404 return response return Migration().get(migration_id=migration.migration_id)
def apply_patches(db_patches_key=None, directory=None, module_base=None): if directory is None: directory = os.path.dirname(__file__) if db_patches_key is None: db_patches_key = DB_PATCHES_KEY if module_base is None: module_base = "shinkansen.orm.patch" with db.redis_conn() as redis: with orm.get_lock(db_patches_key + ".LOCK", redis): _apply_patches(redis, db_patches_key, directory, module_base)
def get(self, source, destination): with db.redis_conn() as redis: partition_vals = set( rec[0] for rec in orm.Migration.get_columns(redis, columns=['partition_val']) ) return { 'partitions': [ { 'id': partition_val, 'href': '/v5/source/%s/destination/%s/partition/%s' % (source, destination, partition_val), 'source': source, 'destination': destination, } for partition_val in partition_vals ], }
def get(self, migration_id): with db.redis_conn() as redis_conn: migration = orm.Migration.get(redis_conn, migration_id=migration_id) if migration is None: response = jsonify({'error': 'Migration %r not found' % (migration_id,)}) response.status_code = 404 return response tables = shinkansen.status.get_table_migration_status(migration_id=migration_id) return { 'migration_id': migration_id, 'type': migration.type, 'source': migration.source_shard, 'destination': migration.destination_shard, 'namespace': migration.namespace, 'partition_val': migration.partition_val, 'start_time': migration.start_time, 'end_time': migration.end_time, 'tables': tables, }
def get(self): parser = reqparse.RequestParser() parser.add_argument('cutoff', type=int, help='cutoff timestamp in ms') parser.add_argument('full', type=strbool, help='show all columns') args = parser.parse_args() if args['cutoff'] is None: cutoff = (time.time() - 14 * 24 * 60 * 60) * 1000 else: cutoff = args['cutoff'] with db.redis_conn() as red: chunks = [] for status in ['migrating', 'converting', 'importing', 'exporting']: chunks.extend(orm.Chunk.get_by_index(red, status=status)) running_chunks = [c for c in chunks if c.queued_time > cutoff] if not running_chunks: return [] cols = [ 'queued_time', 'partition_val', 'source_shard', 'table_name', 'chunk_num', 'num_records_exported', 'num_records_converted', 'num_records_imported', 'status' ] if not args['full'] else running_chunks[0]._cols() date_cols = ['queued_time', 'start_time', 'end_time'] result = [] for chunk in running_chunks: rchunk = {} for col in cols: value = getattr(chunk, col) if col in date_cols: rchunk[col] = None if value is None else str(datetime.fromtimestamp(value // 1000)) else: rchunk[col] = value result.append(rchunk) result.sort(key=lambda t: (t['partition_val'], t['source_shard'], t['table_name'], t['chunk_num'])) return result
def get_migrations_dict(source, destination, partition_val, migration_type=None): with db.redis_conn() as redis_conn: clauses = { 'source_shard': source, 'destination_shard': destination, 'partition_val': partition_val, } if migration_type is not None: clauses['type'] = migration_type migrations = orm.Migration.get_by_index( redis_conn, **clauses) return { 'source': source, 'destination': destination, 'partition_val': partition_val, 'migrations': [ get_migration_status(migration.migration_id) for migration in migrations ], }
def migrate_partition_shard( *args, **kwargs ): with db.redis_conn() as redis_conn: return _migrate_partition_shard(redis_conn, *args, **kwargs)
def get_migration_status(migration_id): with db.redis_conn() as redis_conn: migration = orm.Migration.get(redis_conn, migration_id=migration_id) if migration is None: return None return _get_migration_status(migration)