Ejemplo n.º 1
0
    def test_enums_match_schema(self) -> None:
        with runner(self.default_config(), self.engine) as r:
            r.migrate_up_to("head")

        # Fetch enum values
        migration_enums = self.fetch_all_enums()

        # Doing teardown/setup to generate a new postgres instance
        local_postgres_helpers.restore_local_env_vars(self.overridden_env_vars)
        local_postgres_helpers.stop_and_clear_on_disk_postgresql_database(
            self.db_dir)

        self.db_dir = local_postgres_helpers.start_on_disk_postgresql_database(
        )
        self.overridden_env_vars = (
            local_postgres_helpers.update_local_sqlalchemy_postgres_env_vars())

        local_postgres_helpers.use_on_disk_postgresql_database(
            self.database_key)

        # Check enum values
        schema_enums = self.fetch_all_enums()

        # Assert that they all match
        self.assertEqual(len(migration_enums), len(schema_enums))
        for enum_name, migration_values in migration_enums.items():
            schema_values = schema_enums[enum_name]
            self.assertCountEqual(migration_values, schema_values)

        # Cleanup needed for this method.
        local_postgres_helpers.teardown_on_disk_postgresql_database(
            self.database_key)
Ejemplo n.º 2
0
def main(database: SchemaType, repo_root: str, ssl_cert_path: str) -> None:
    """
    Invokes the main code path for running a downgrade.

    This checks for user validations that the database and branches are correct and then runs the downgrade
    migration.
    """

    is_prod = metadata.project_id() == GCP_PROJECT_PRODUCTION
    if is_prod:
        logging.info("RUNNING AGAINST PRODUCTION\n")

    prompt_for_confirmation("This script will run a DOWNGRADE migration.", "DOWNGRADE")
    confirm_correct_db(database)
    confirm_correct_git_branch(repo_root, is_prod=is_prod)

    overriden_env_vars = SQLAlchemyEngineManager.update_sqlalchemy_env_vars(
        database,
        ssl_cert_path=ssl_cert_path,
        migration_user=True,
    )

    # Run downgrade
    try:
        config = alembic.config.Config(
            SQLAlchemyEngineManager.get_alembic_file(database)
        )
        alembic.command.downgrade(config, "-1")
    except Exception as e:
        logging.error("Downgrade failed to run: %s", e)
        sys.exit(1)
    finally:
        local_postgres_helpers.restore_local_env_vars(overriden_env_vars)
Ejemplo n.º 3
0
    def test_enums_match_schema(self):
        with runner(self.default_config()) as r:
            r.migrate_up_to('head')

        # Fetch enum values
        migration_enums = self.fetch_all_enums()

        # Doing teardown/setup to generate a new postgres instance
        local_postgres_helpers.restore_local_env_vars(self.overridden_env_vars)
        local_postgres_helpers.stop_and_clear_on_disk_postgresql_database(
            self.db_dir)

        self.db_dir = local_postgres_helpers.start_on_disk_postgresql_database(
        )
        self.overridden_env_vars = local_postgres_helpers.update_local_sqlalchemy_postgres_env_vars(
        )

        local_postgres_helpers.use_on_disk_postgresql_database(
            SQLAlchemyEngineManager.declarative_method_for_schema(
                self.schema_type))

        # Check enum values
        schema_enums = self.fetch_all_enums()

        # Assert that they all match
        self.assertEqual(len(migration_enums), len(schema_enums))
        for enum_name, migration_values in migration_enums.items():
            schema_values = schema_enums[enum_name]
            self.assertEqual(len(migration_values),
                             len(schema_values),
                             msg=f'{enum_name} lengths differ')
            self.assertEqual(len(migration_values),
                             len(migration_values.intersection(schema_values)),
                             msg=f'{enum_name} values differ')
Ejemplo n.º 4
0
def main(schema_type: SchemaType, message: str, use_local_db: bool) -> None:
    """Runs the script to autogenerate migrations."""
    database_key = SQLAlchemyDatabaseKey.canonical_for_schema(schema_type)
    if use_local_db:
        # TODO(#4619): We should eventually move this from a local postgres instance to running
        # postgres from a docker container.
        if not local_postgres_helpers.can_start_on_disk_postgresql_database():
            logging.error(
                "pg_ctl is not installed, so the script cannot be run locally. "
                "--project-id must be specified to run against staging or production."
            )
            logging.error("Exiting...")
            sys.exit(1)
        logging.info("Starting local postgres database for autogeneration...")
        tmp_db_dir = local_postgres_helpers.start_on_disk_postgresql_database()
        original_env_vars = (
            local_postgres_helpers.update_local_sqlalchemy_postgres_env_vars())
    else:
        # TODO(Recidiviz/zenhub-tasks#134): This code path will throw when pointed at staging
        # because we havne't created valid read-only users there just yet.
        try:
            original_env_vars = SQLAlchemyEngineManager.update_sqlalchemy_env_vars(
                database_key=database_key, readonly_user=True)
        except ValueError as e:
            logging.warning("Error fetching SQLAlchemy credentials: %s", e)
            logging.warning(
                "Until readonly users are created, we cannot autogenerate migrations against staging."
            )
            logging.warning(
                "See https://github.com/Recidiviz/zenhub-tasks/issues/134")
            sys.exit(1)

    try:
        config = alembic.config.Config(database_key.alembic_file)
        if use_local_db:
            upgrade(config, "head")
        revision(config, autogenerate=True, message=message)
    except Exception as e:
        logging.error("Automigration generation failed: %s", e)

    local_postgres_helpers.restore_local_env_vars(original_env_vars)
    if use_local_db:
        logging.info("Stopping local postgres database...")
        local_postgres_helpers.stop_and_clear_on_disk_postgresql_database(
            tmp_db_dir)
Ejemplo n.º 5
0
def _get_old_enum_values(schema_type: SchemaType, enum_name: str) -> List[str]:
    """Fetches the current enum values for the given schema and enum name."""
    # Setup temp pg database
    db_dir = local_postgres_helpers.start_on_disk_postgresql_database()
    database_key = SQLAlchemyDatabaseKey.canonical_for_schema(schema_type)
    overridden_env_vars = (
        local_postgres_helpers.update_local_sqlalchemy_postgres_env_vars())
    engine = create_engine(
        local_postgres_helpers.postgres_db_url_from_env_vars())

    try:
        # Fetch enums
        default_config = {
            "file": database_key.alembic_file,
            "script_location": database_key.migrations_location,
        }
        with runner(default_config, engine) as r:
            r.migrate_up_to("head")
        conn = engine.connect()
        rows = conn.execute(f"""
        SELECT e.enumlabel as enum_value
        FROM pg_type t
            JOIN pg_enum e ON t.oid = e.enumtypid
            JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace
        WHERE
            n.nspname = 'public'
            AND t.typname = '{enum_name}';
        """)
        enums = [row[0] for row in rows]
    finally:
        # Teardown temp pg database
        local_postgres_helpers.restore_local_env_vars(overridden_env_vars)
        local_postgres_helpers.stop_and_clear_on_disk_postgresql_database(
            db_dir)

    return enums
Ejemplo n.º 6
0
 def tearDown(self) -> None:
     local_postgres_helpers.teardown_on_disk_postgresql_database(
         self.db_key)
     local_postgres_helpers.restore_local_env_vars(self.env_vars)
Ejemplo n.º 7
0
 def tearDown(self) -> None:
     local_postgres_helpers.restore_local_env_vars(self.overridden_env_vars)
     local_postgres_helpers.teardown_on_disk_postgresql_database(
         SQLAlchemyDatabaseKey.for_schema(SchemaType.CASE_TRIAGE))
Ejemplo n.º 8
0
 def tearDown(self) -> None:
     local_postgres_helpers.restore_local_env_vars(self.overridden_env_vars)
     local_postgres_helpers.stop_and_clear_on_disk_postgresql_database(
         self.db_dir)
Ejemplo n.º 9
0
def main(
    state_code: StateCode,
    database_version: SQLAlchemyStateDatabaseVersion,
    ssl_cert_path: str,
    purge_schema: bool,
) -> None:
    """
    Invokes the main code path for running a downgrade.

    This checks for user validations that the database and branches are correct and then runs the downgrade
    migration.
    """
    # TODO(#7984): Once we have cut all traffic over to single-database traffic,
    #   delete this branch.
    if database_version == SQLAlchemyStateDatabaseVersion.LEGACY:
        logging.error(
            "Should not invoke purge_state_db script with legacy database version."
        )
        sys.exit(1)

    is_prod = metadata.project_id() == GCP_PROJECT_PRODUCTION
    if is_prod:
        logging.info("RUNNING AGAINST PRODUCTION\n")

    purge_str = ("PURGE DATABASE STATE IN STAGING" if metadata.project_id()
                 == GCP_PROJECT_STAGING else "PURGE DATABASE STATE IN PROD")
    prompt_for_confirmation(
        f"This script will PURGE all data for for [{state_code.value}] in DB [{database_version.value}].",
        purge_str,
    )
    if purge_schema:
        purge_schema_str = ("RUN DOWNGRADE MIGRATIONS IN STAGING"
                            if metadata.project_id() == GCP_PROJECT_STAGING
                            else "RUN DOWNGRADE MIGRATIONS IN PROD")
        prompt_for_confirmation(
            f"This script will run all DOWNGRADE migrations for "
            f"[{state_code.value}] in DB [{database_version.value}].",
            purge_schema_str,
        )

    db_key = SQLAlchemyDatabaseKey.for_state_code(state_code, database_version)

    with SessionFactory.for_prod_data_client(db_key, ssl_cert_path) as session:
        truncate_commands = [
            "TRUNCATE TABLE state_person CASCADE;",
            "TRUNCATE TABLE state_agent CASCADE;",
        ]
        for command in truncate_commands:
            logging.info('Running query ["%s"]. This may take a while...',
                         command)
            session.execute(command)

        logging.info("Done running truncate commands.")

    if purge_schema:
        with SessionFactory.for_prod_data_client(
                db_key, ssl_cert_path) as purge_session:
            overriden_env_vars = None
            try:
                logging.info("Purging schema...")
                overriden_env_vars = SQLAlchemyEngineManager.update_sqlalchemy_env_vars(
                    database_key=db_key,
                    ssl_cert_path=ssl_cert_path,
                    migration_user=True,
                )
                config = alembic.config.Config(db_key.alembic_file)
                alembic.command.downgrade(config, "base")

                # We need to manually delete alembic_version because it's leftover after
                # the downgrade migrations
                purge_session.execute("DROP TABLE alembic_version;")
            finally:
                if overriden_env_vars:
                    local_postgres_helpers.restore_local_env_vars(
                        overriden_env_vars)

        logging.info("Purge complete.")
Ejemplo n.º 10
0
def main(
    schema_type: SchemaType,
    repo_root: str,
    ssl_cert_path: str,
    dry_run: bool,
    skip_db_name_check: bool,
    confirm_hash: Optional[str],
) -> None:
    """
    Invokes the main code path for running migrations.

    This checks for user validations that the database and branches are correct and then runs existing pending
    migrations.
    """
    if dry_run:
        if not local_postgres_helpers.can_start_on_disk_postgresql_database():
            logging.error("pg_ctl is not installed. Cannot perform a dry-run.")
            logging.error("Exiting...")
            sys.exit(1)
        logging.info("Creating a dry-run...\n")
    else:
        if not ssl_cert_path:
            logging.error(
                "SSL certificates are required when running against live databases"
            )
            logging.error("Exiting...")
            sys.exit(1)
        logging.info("Using SSL certificate path: %s", ssl_cert_path)

    is_prod = metadata.project_id() == GCP_PROJECT_PRODUCTION
    if is_prod:
        logging.info("RUNNING AGAINST PRODUCTION\n")

    if not skip_db_name_check:
        confirm_correct_db_instance(schema_type)
    confirm_correct_git_branch(repo_root, confirm_hash=confirm_hash)

    if dry_run:
        db_keys = [SQLAlchemyDatabaseKey.canonical_for_schema(schema_type)]
    else:
        db_keys = [
            key for key in SQLAlchemyDatabaseKey.all()
            if key.schema_type == schema_type
        ]

    # Run migrations
    for key in db_keys:
        if dry_run:
            overriden_env_vars = (local_postgres_helpers.
                                  update_local_sqlalchemy_postgres_env_vars())
        else:
            overriden_env_vars = SQLAlchemyEngineManager.update_sqlalchemy_env_vars(
                database_key=key,
                ssl_cert_path=ssl_cert_path,
                migration_user=True,
            )
        try:
            logging.info(
                "*** Starting postgres migrations for schema [%s], db_name [%s] ***",
                key.schema_type,
                key.db_name,
            )
            if dry_run:
                db_dir = local_postgres_helpers.start_on_disk_postgresql_database(
                )
            config = alembic.config.Config(key.alembic_file)
            alembic.command.upgrade(config, "head")
        except Exception as e:
            logging.error("Migrations failed to run: %s", e)
            sys.exit(1)
        finally:
            local_postgres_helpers.restore_local_env_vars(overriden_env_vars)
            if dry_run:
                try:
                    logging.info("Stopping local postgres database")
                    local_postgres_helpers.stop_and_clear_on_disk_postgresql_database(
                        db_dir)
                except Exception as e2:
                    logging.error("Error cleaning up postgres: %s", e2)
Ejemplo n.º 11
0
def main(database: SchemaType, repo_root: str, ssl_cert_path: str,
         dry_run: bool):
    """
    Invokes the main code path for running migrations.

    This checks for user validations that the database and branches are correct and then runs existing pending
    migrations.
    """

    requires_ssl = SQLAlchemyEngineManager.database_requires_ssl(
        metadata.project_id())

    if requires_ssl and not ssl_cert_path:
        logging.error(
            'Specifying an argument to --ssl-cert-path is required for the specified database.'
        )
        logging.error('Exiting...')
        sys.exit(1)

    if dry_run:
        if not local_postgres_helpers.can_start_on_disk_postgresql_database():
            logging.error('pg_ctl is not installed. Cannot perform a dry-run.')
            logging.error('Exiting...')
            sys.exit(1)
        logging.info('Creating a dry-run...\n')

    is_prod = metadata.project_id() == GCP_PROJECT_PRODUCTION
    dbname = SQLAlchemyEngineManager.get_stripped_cloudsql_instance_id(
        database)
    if is_prod:
        logging.info('RUNNING AGAINST PRODUCTION\n')

    db_check = input(
        f'Running new migrations on {dbname}. Please type "{dbname}" to confirm. (Anything else exits):\n'
    )
    if db_check != dbname:
        logging.warning('\nConfirmation aborted.')
        sys.exit(1)

    # Get user to validate git branch
    try:
        repo = Repository(repo_root)
    except Exception as e:
        logging.error('improper project root provided: %s', e)
        sys.exit(1)

    current_branch = repo.head.shorthand

    if is_prod and not current_branch.startswith('releases/'):
        logging.error(
            'Migrations run against production must be from a release branch. The current branch is %s.',
            current_branch)
        sys.exit(1)

    branch_check = input(
        f'\nThis script will execute all migrations on this branch ({current_branch}) that have not yet been run '
        f'against {dbname}. Please type "{current_branch}" to confirm your branch. (Anything else exits):\n'
    )
    if branch_check != current_branch:
        logging.warning('\nConfirmation aborted.')
        sys.exit(1)

    # Fetch secrets
    if dry_run:
        overriden_env_vars = local_postgres_helpers.update_local_sqlalchemy_postgres_env_vars(
        )
    else:
        overriden_env_vars = SQLAlchemyEngineManager.update_sqlalchemy_env_vars(
            database, ssl_cert_path=ssl_cert_path)

    # Run migrations
    try:
        if dry_run:
            logging.info(
                'Starting local postgres database for migrations dry run')
            db_dir = local_postgres_helpers.start_on_disk_postgresql_database()
        config = alembic.config.Config(
            SQLAlchemyEngineManager.get_alembic_file(database))
        alembic.command.upgrade(config, 'head')
    except Exception as e:
        logging.error('Migrations failed to run: %s', e)
        sys.exit(1)
    finally:
        local_postgres_helpers.restore_local_env_vars(overriden_env_vars)
        if dry_run:
            try:
                logging.info('Stopping local postgres database')
                local_postgres_helpers.stop_and_clear_on_disk_postgresql_database(
                    db_dir)
            except Exception as e2:
                logging.error('Error cleaning up postgres: %s', e2)
Ejemplo n.º 12
0
def main(
    database: SchemaType, repo_root: str, ssl_cert_path: str, dry_run: bool
) -> None:
    """
    Invokes the main code path for running migrations.

    This checks for user validations that the database and branches are correct and then runs existing pending
    migrations.
    """
    if dry_run:
        if not local_postgres_helpers.can_start_on_disk_postgresql_database():
            logging.error("pg_ctl is not installed. Cannot perform a dry-run.")
            logging.error("Exiting...")
            sys.exit(1)
        logging.info("Creating a dry-run...\n")
    else:
        if not ssl_cert_path:
            logging.error(
                "SSL certificates are required when running against live databases"
            )
            logging.error("Exiting...")
            sys.exit(1)
        logging.info("Using SSL certificate path: %s", ssl_cert_path)

    is_prod = metadata.project_id() == GCP_PROJECT_PRODUCTION
    if is_prod:
        logging.info("RUNNING AGAINST PRODUCTION\n")

    confirm_correct_db(database)
    confirm_correct_git_branch(repo_root, is_prod=is_prod)

    if dry_run:
        overriden_env_vars = (
            local_postgres_helpers.update_local_sqlalchemy_postgres_env_vars()
        )
    else:
        overriden_env_vars = SQLAlchemyEngineManager.update_sqlalchemy_env_vars(
            database,
            ssl_cert_path=ssl_cert_path,
            migration_user=True,
        )

    # Run migrations
    try:
        if dry_run:
            logging.info("Starting local postgres database for migrations dry run")
            db_dir = local_postgres_helpers.start_on_disk_postgresql_database()
        config = alembic.config.Config(
            SQLAlchemyEngineManager.get_alembic_file(database)
        )
        alembic.command.upgrade(config, "head")
    except Exception as e:
        logging.error("Migrations failed to run: %s", e)
        sys.exit(1)
    finally:
        local_postgres_helpers.restore_local_env_vars(overriden_env_vars)
        if dry_run:
            try:
                logging.info("Stopping local postgres database")
                local_postgres_helpers.stop_and_clear_on_disk_postgresql_database(
                    db_dir
                )
            except Exception as e2:
                logging.error("Error cleaning up postgres: %s", e2)
Ejemplo n.º 13
0
 def tearDown(self) -> None:
     local_postgres_helpers.restore_local_env_vars(self.overridden_env_vars)
     local_postgres_helpers.teardown_on_disk_postgresql_database(
         CaseTriageBase)