def reverse_migration( self, migration_key: MigrationKey, *, force: bool = False, fake: bool = False, ) -> None: """ Reverses a migration. """ migration_group, migration_id = migration_key group_migrations = get_group_loader(migration_group).get_migrations() if migration_id not in group_migrations: raise MigrationError("Invalid migration") migration_status = self._get_migration_status() def get_status(migration_key: MigrationKey) -> Status: return migration_status.get(migration_key, Status.NOT_STARTED) if get_status(migration_key) == Status.NOT_STARTED: raise MigrationError( "You cannot reverse a migration that has not been run") if get_status( migration_key) == Status.COMPLETED and not force and not fake: raise MigrationError( "You must use force to revert an already completed migration") for m in group_migrations[group_migrations.index(migration_id) + 1:]: if get_status(MigrationKey(migration_group, m)) != Status.NOT_STARTED: raise MigrationError( "Subsequent migrations must be reversed first") if fake: self._update_migration_status(migration_key, Status.NOT_STARTED) else: context = Context( migration_id, logger, partial(self._update_migration_status, migration_key), ) migration = get_group_loader( migration_key.group).load_migration(migration_id) migration.backwards(context)
def _get_pending_migrations(self) -> List[MigrationKey]: """ Gets pending migration list. """ migrations: List[MigrationKey] = [] migration_status = self._get_migration_status() def get_status(migration_key: MigrationKey) -> Status: return migration_status.get(migration_key, Status.NOT_STARTED) for group in get_active_migration_groups(): group_loader = get_group_loader(group) group_migrations: List[MigrationKey] = [] for migration_id in group_loader.get_migrations(): migration_key = MigrationKey(group, migration_id) status = get_status(migration_key) if status == Status.IN_PROGRESS: raise MigrationInProgress(migration_key) if status == Status.NOT_STARTED: group_migrations.append(migration_key) elif status == Status.COMPLETED and len(group_migrations): # We should never have a completed migration after a pending one for that group missing_migrations = ", ".join( [m.migration_id for m in group_migrations] ) raise InvalidMigrationState( f"Missing migrations: {missing_migrations}" ) migrations.extend(group_migrations) return migrations
def show_all(self) -> List[Tuple[MigrationGroup, List[MigrationDetails]]]: """ Returns the list of migrations and their statuses for each group. """ migrations: List[Tuple[MigrationGroup, List[MigrationDetails]]] = [] migration_status = self._get_migration_status() def get_status(migration_key: MigrationKey) -> Status: return migration_status.get(migration_key, Status.NOT_STARTED) for group in get_active_migration_groups(): group_migrations: List[MigrationDetails] = [] group_loader = get_group_loader(group) for migration_id in group_loader.get_migrations(): migration_key = MigrationKey(group, migration_id) migration = group_loader.load_migration(migration_id) group_migrations.append( MigrationDetails( migration_id, get_status(migration_key), migration.blocking ) ) migrations.append((group, group_migrations)) return migrations
def _reverse_migration_impl( self, migration_key: MigrationKey, *, dry_run: bool = False ) -> None: migration_id = migration_key.migration_id context = Context( migration_id, logger, partial(self._update_migration_status, migration_key), ) migration = get_group_loader(migration_key.group).load_migration(migration_id) migration.backwards(context, dry_run)
def test_groupedmessages_compatibility() -> None: cluster = get_cluster(StorageSetKey.EVENTS) # Ignore the multi node mode because this tests a migration # for an older table state that only applied to single node if not cluster.is_single_node(): return database = cluster.get_database() connection = cluster.get_query_connection(ClickhouseClientSettings.MIGRATE) # Create old style table witihout project ID connection.execute(""" CREATE TABLE groupedmessage_local (`offset` UInt64, `record_deleted` UInt8, `id` UInt64, `status` Nullable(UInt8), `last_seen` Nullable(DateTime), `first_seen` Nullable(DateTime), `active_at` Nullable(DateTime), `first_release_id` Nullable(UInt64)) ENGINE = ReplacingMergeTree(offset) ORDER BY id SAMPLE BY id SETTINGS index_granularity = 8192 """) migration_id = "0010_groupedmessages_onpremise_compatibility" runner = Runner() runner.run_migration(MigrationKey(MigrationGroup.SYSTEM, "0001_migrations")) events_migrations = get_group_loader( MigrationGroup.EVENTS).get_migrations() # Mark prior migrations complete for migration in events_migrations[:( events_migrations.index(migration_id))]: runner._update_migration_status( MigrationKey(MigrationGroup.EVENTS, migration), Status.COMPLETED) runner.run_migration( MigrationKey(MigrationGroup.EVENTS, migration_id), force=True, ) outcome = perform_select_query( ["primary_key"], "system.tables", { "name": "groupedmessage_local", "database": str(database) }, None, connection, ) assert outcome == [("project_id, id", )]
def _run_migration_impl( self, migration_key: MigrationKey, *, force: bool = False, dry_run: bool = False ) -> None: migration_id = migration_key.migration_id context = Context( migration_id, logger, partial(self._update_migration_status, migration_key), ) migration = get_group_loader(migration_key.group).load_migration(migration_id) if migration.blocking and not dry_run and not force: raise MigrationError("Blocking migrations must be run with force") migration.forwards(context, dry_run)
def reverse_migration( self, migration_key: MigrationKey, *, force: bool = False, fake: bool = False, dry_run: bool = False, ) -> None: """ Reverses a migration. """ if not dry_run: assert_single_node() migration_group, migration_id = migration_key group_migrations = get_group_loader(migration_group).get_migrations() if migration_id not in group_migrations: raise MigrationError("Invalid migration") if dry_run: self._reverse_migration_impl(migration_key, dry_run=True) return migration_status = self._get_migration_status() def get_status(migration_key: MigrationKey) -> Status: return migration_status.get(migration_key, Status.NOT_STARTED) if get_status(migration_key) == Status.NOT_STARTED: raise MigrationError( "You cannot reverse a migration that has not been run") if get_status( migration_key) == Status.COMPLETED and not force and not fake: raise MigrationError( "You must use force to revert an already completed migration") for m in group_migrations[group_migrations.index(migration_id) + 1:]: if get_status(MigrationKey(migration_group, m)) != Status.NOT_STARTED: raise MigrationError( "Subsequent migrations must be reversed first") if fake: self._update_migration_status(migration_key, Status.NOT_STARTED) else: self._reverse_migration_impl(migration_key)
def add_node( self, node_type: ClickhouseNodeType, storage_sets: Sequence[StorageSetKey], host_name: str, port: int, user: str, password: str, database: str, ) -> None: client_settings = ClickhouseClientSettings.MIGRATE.value clickhouse = ClickhousePool( host_name, port, user, password, database, client_settings=client_settings.settings, send_receive_timeout=client_settings.timeout, ) migrations: List[Migration] = [] for group in get_active_migration_groups(): group_loader = get_group_loader(group) for migration_id in group_loader.get_migrations(): migration = group_loader.load_migration(migration_id) migrations.append(migration) for migration in migrations: if isinstance(migration, ClickhouseNodeMigration): operations = ( migration.forwards_local() if node_type == ClickhouseNodeType.LOCAL else migration.forwards_dist() ) for sql_op in operations: if isinstance(sql_op, SqlOperation): if sql_op._storage_set in storage_sets: sql = sql_op.format_sql() print(f"Executing {sql}") clickhouse.execute(sql) elif isinstance(migration, CodeMigration): for python_op in migration.forwards_global(): python_op.execute_new_node(storage_sets)
def run_migration( self, migration_key: MigrationKey, *, force: bool = False, fake: bool = False, dry_run: bool = False, ) -> None: """ Run a single migration given its migration key and marks the migration as complete. Blocking migrations must be run with force. """ if not dry_run: assert_single_node() migration_group, migration_id = migration_key group_migrations = get_group_loader(migration_group).get_migrations() if migration_id not in group_migrations: raise MigrationError("Could not find migration in group") if dry_run: self._run_migration_impl(migration_key, dry_run=True) return migration_status = self._get_migration_status() def get_status(migration_key: MigrationKey) -> Status: return migration_status.get(migration_key, Status.NOT_STARTED) if get_status(migration_key) != Status.NOT_STARTED: status_text = get_status(migration_key).value raise MigrationError(f"Migration is already {status_text}") for m in group_migrations[:group_migrations.index(migration_id)]: if get_status(MigrationKey(migration_group, m)) != Status.COMPLETED: raise MigrationError( "Earlier migrations ned to be completed first") if fake: self._update_migration_status(migration_key, Status.COMPLETED) else: self._run_migration_impl(migration_key, force=force)
def run_all(self, *, force: bool = False) -> None: """ Run all pending migrations. Throws an error if any migration is in progress. Requires force to run blocking migrations. """ pending_migrations = self._get_pending_migrations() # Do not run migrations if any are blocking if not force: for migration_key in pending_migrations: migration = get_group_loader(migration_key.group).load_migration( migration_key.migration_id ) if migration.blocking: raise MigrationError("Requires force to run blocking migrations") for migration_key in pending_migrations: self._run_migration_impl(migration_key, force=force)
def test_reverse_migration() -> None: runner = Runner() runner.run_all(force=True) connection = get_cluster(StorageSetKey.MIGRATIONS).get_query_connection( ClickhouseClientSettings.MIGRATE) # Invalid migration ID with pytest.raises(MigrationError): runner.reverse_migration(MigrationKey(MigrationGroup.SYSTEM, "xxx")) with pytest.raises(MigrationError): runner.reverse_migration( MigrationKey(MigrationGroup.EVENTS, "0003_errors")) # Reverse with --fake for migration_id in reversed( get_group_loader(MigrationGroup.EVENTS).get_migrations()): runner.reverse_migration(MigrationKey(MigrationGroup.EVENTS, migration_id), fake=True) assert (len(connection.execute("SHOW TABLES LIKE 'sentry_local'")) == 1 ), "Table still exists"
def test_groupedmessages_compatibility() -> None: cluster = get_cluster(StorageSetKey.EVENTS) database = cluster.get_database() connection = cluster.get_query_connection(ClickhouseClientSettings.MIGRATE) # Create old style table witihout project ID connection.execute(""" CREATE TABLE groupedmessage_local (`offset` UInt64, `record_deleted` UInt8, `id` UInt64, `status` Nullable(UInt8), `last_seen` Nullable(DateTime), `first_seen` Nullable(DateTime), `active_at` Nullable(DateTime), `first_release_id` Nullable(UInt64)) ENGINE = ReplacingMergeTree(offset) ORDER BY id SAMPLE BY id SETTINGS index_granularity = 8192 """) migration_id = "0010_groupedmessages_onpremise_compatibility" runner = Runner() runner.run_migration(MigrationKey(MigrationGroup.SYSTEM, "0001_migrations")) events_migrations = get_group_loader( MigrationGroup.EVENTS).get_migrations() # Mark prior migrations complete for migration in events_migrations[:( events_migrations.index(migration_id))]: runner._update_migration_status( MigrationKey(MigrationGroup.EVENTS, migration), Status.COMPLETED) runner.run_migration( MigrationKey(MigrationGroup.EVENTS, migration_id), force=True, ) assert connection.execute( f"SELECT primary_key FROM system.tables WHERE name = 'groupedmessage_local' AND database = '{database}'" ) == [("project_id, id", )]
def get_total_migration_count() -> int: count = 0 for group in MigrationGroup: count += len(get_group_loader(group).get_migrations()) return count
def get_total_migration_count() -> int: count = 0 for group in get_active_migration_groups(): count += len(get_group_loader(group).get_migrations()) return count
def test_load_all_migrations() -> None: for group in MigrationGroup: group_loader = get_group_loader(group) for migration in group_loader.get_migrations(): group_loader.load_migration(migration)