def handle(self, *args, **options): setup_async_migrations(ignore_posthog_version=True) necessary_migrations = get_necessary_migrations() if options["plan"] or options["check"]: print() if len(necessary_migrations) == 0: print("Async migrations up to date!") return print("List of async migrations to be applied:") for migration in necessary_migrations: print(f"- {migration.name}") print() if options["check"]: exit(1) return for migration in necessary_migrations: logger.info(f"Applying async migration {migration.name}") started_successfully = start_async_migration(migration.name, ignore_posthog_version=True) migration.refresh_from_db() if not started_successfully or migration.status != MigrationStatus.CompletedSuccessfully: last_error = AsyncMigrationError.objects.filter(async_migration=migration).last() last_error_msg = f", last error: {last_error.description}" if last_error else "" logger.info(f"Unable to complete async migration {migration.name}{last_error_msg}.") raise ImproperlyConfigured( f"Migrate job failed because necessary async migration {migration.name} could not complete." ) logger.info(f"✅ Migration {migration.name} successful")
def test_migration(self): from posthog.client import sync_execute p1, p2, p3, p4, p5, p6 = [UUID(int=i) for i in range(6)] self.create_distinct_id(team_id=1, distinct_id="a", person_id=str(p1), sign=1) self.create_distinct_id(team_id=2, distinct_id="a", person_id=str(p2), sign=1) # Merged user self.create_distinct_id(team_id=2, distinct_id="b", person_id=str(p3), sign=1) self.create_distinct_id(team_id=2, distinct_id="b", person_id=str(p3), sign=-1) self.create_distinct_id(team_id=2, distinct_id="b", person_id=str(p4), sign=1) # Deleted user self.create_distinct_id(team_id=2, distinct_id="c", person_id=str(p5), sign=1) self.create_distinct_id(team_id=2, distinct_id="c", person_id=str(p5), sign=-1) self.create_distinct_id(team_id=3, distinct_id="d", person_id=str(p6), sign=1) setup_async_migrations() migration_successful = start_async_migration(MIGRATION_NAME) self.assertTrue(migration_successful) rows = sync_execute( "SELECT team_id, distinct_id, person_id, version FROM person_distinct_id2 ORDER BY team_id, distinct_id" ) self.assertEqual(rows, [(1, "a", p1, 0), (2, "a", p2, 0), (2, "b", p4, 0), (3, "d", p6, 0)])
def test_migration(self): # :TRICKY: Relies on tables being migrated as unreplicated before. _create_event(team=self.team, distinct_id="test", event="$pageview") _create_event(team=self.team, distinct_id="test2", event="$pageview") settings.CLICKHOUSE_REPLICATION = True setup_async_migrations() migration_successful = start_async_migration(MIGRATION_NAME) self.assertTrue(migration_successful) self.verify_table_engines_correct() self.assertEqual(self.get_event_table_row_count(), 2)
def ready(self): posthoganalytics.api_key = "sTMFPsFhdP1Ssg" posthoganalytics.personal_api_key = os.environ.get( "POSTHOG_PERSONAL_API_KEY") if settings.TEST or os.environ.get("OPT_OUT_CAPTURE", False): posthoganalytics.disabled = True elif settings.DEBUG: # log development server launch to posthog if os.getenv("RUN_MAIN") == "true": # Sync all organization.available_features once on launch, in case plans changed from posthog.celery import sync_all_organization_available_features sync_all_organization_available_features() posthoganalytics.capture( get_machine_id(), "development server launched", { "posthog_version": VERSION, "git_rev": get_git_commit(), "git_branch": get_git_branch(), }, ) if SELF_CAPTURE: posthoganalytics.api_key = get_self_capture_api_token(None) else: posthoganalytics.disabled = True if not settings.SKIP_SERVICE_VERSION_REQUIREMENTS: for service_version_requirement in settings.SERVICE_VERSION_REQUIREMENTS: in_range, version = service_version_requirement.is_service_in_accepted_version( ) if not in_range: print( f"\033[91mService {service_version_requirement.service} is in version {version}. Expected range: {str(service_version_requirement.supported_version)}. PostHog may not work correctly with the current version. To continue anyway, add SKIP_SERVICE_VERSION_REQUIREMENTS=1 as an environment variable\033[0m", ) exit(1) from posthog.async_migrations.setup import setup_async_migrations if SKIP_ASYNC_MIGRATIONS_SETUP: print_warning([ "Skipping async migrations setup. This is unsafe in production!" ]) else: setup_async_migrations()
def test_rollback(self): # :TRICKY: Relies on tables being migrated as unreplicated before. _create_event(team=self.team, distinct_id="test", event="$pageview") _create_event(team=self.team, distinct_id="test2", event="$pageview") settings.CLICKHOUSE_REPLICATION = True setup_async_migrations() migration = get_async_migration_definition(MIGRATION_NAME) self.assertEqual(len(migration.operations), 53) migration.operations[30].sql = "THIS WILL FAIL!" # type: ignore migration_successful = start_async_migration(MIGRATION_NAME) self.assertFalse(migration_successful) self.assertEqual(AsyncMigration.objects.get(name=MIGRATION_NAME).status, MigrationStatus.RolledBack) self.verify_table_engines_correct(expected_engine_types=("ReplacingMergeTree", "CollapsingMergeTree", "Kafka"))
def test_migration(self): # :TRICKY: Relies on tables being migrated as unreplicated before. _create_event(team=self.team, distinct_id="test", event="$pageview") _create_event(team=self.team, distinct_id="test2", event="$pageview") settings.CLICKHOUSE_REPLICATION = True setup_async_migrations() migration_successful = start_async_migration(MIGRATION_NAME) self.assertTrue(migration_successful) self.verify_table_engines_correct( expected_engine_types=( "ReplicatedReplacingMergeTree", "ReplicatedCollapsingMergeTree", "Distributed", "Kafka", ) ) self.assertEqual(self.get_event_table_row_count(), 2)