Beispiel #1
0
    def test_enums_match_schema(self) -> None:
        with runner(self.default_config(), self.engine) as r:
            r.migrate_up_to("head")

        # Fetch enum values
        migration_enums = self.fetch_all_enums()

        # Doing teardown/setup to generate a new postgres instance
        local_postgres_helpers.restore_local_env_vars(self.overridden_env_vars)
        local_postgres_helpers.stop_and_clear_on_disk_postgresql_database(
            self.db_dir)

        self.db_dir = local_postgres_helpers.start_on_disk_postgresql_database(
        )
        self.overridden_env_vars = (
            local_postgres_helpers.update_local_sqlalchemy_postgres_env_vars())

        local_postgres_helpers.use_on_disk_postgresql_database(
            self.database_key)

        # Check enum values
        schema_enums = self.fetch_all_enums()

        # Assert that they all match
        self.assertEqual(len(migration_enums), len(schema_enums))
        for enum_name, migration_values in migration_enums.items():
            schema_values = schema_enums[enum_name]
            self.assertCountEqual(migration_values, schema_values)

        # Cleanup needed for this method.
        local_postgres_helpers.teardown_on_disk_postgresql_database(
            self.database_key)
Beispiel #2
0
    def setUp(self) -> None:
        self.test_app = Flask(__name__)
        self.helpers = CaseTriageTestHelpers.from_test(self, self.test_app)

        self.database_key = SQLAlchemyDatabaseKey.for_schema(
            SchemaType.CASE_TRIAGE)
        self.overridden_env_vars = (
            local_postgres_helpers.update_local_sqlalchemy_postgres_env_vars())
        db_url = local_postgres_helpers.postgres_db_url_from_env_vars()
        engine = setup_scoped_sessions(self.test_app, db_url)
        # Auto-generate all tables that exist in our schema in this database
        self.database_key = SQLAlchemyDatabaseKey.for_schema(
            SchemaType.CASE_TRIAGE)
        self.database_key.declarative_meta.metadata.create_all(engine)

        self.demo_clients = get_fixture_clients()
        self.demo_opportunities = get_fixture_opportunities()

        self.client_1 = self.demo_clients[0]

        with self.helpers.using_demo_user():
            self.helpers.create_case_update(
                self.client_1.person_external_id,
                CaseUpdateActionType.COMPLETED_ASSESSMENT.value,
            )
Beispiel #3
0
    def test_enums_match_schema(self):
        with runner(self.default_config()) as r:
            r.migrate_up_to('head')

        # Fetch enum values
        migration_enums = self.fetch_all_enums()

        # Doing teardown/setup to generate a new postgres instance
        local_postgres_helpers.restore_local_env_vars(self.overridden_env_vars)
        local_postgres_helpers.stop_and_clear_on_disk_postgresql_database(
            self.db_dir)

        self.db_dir = local_postgres_helpers.start_on_disk_postgresql_database(
        )
        self.overridden_env_vars = local_postgres_helpers.update_local_sqlalchemy_postgres_env_vars(
        )

        local_postgres_helpers.use_on_disk_postgresql_database(
            SQLAlchemyEngineManager.declarative_method_for_schema(
                self.schema_type))

        # Check enum values
        schema_enums = self.fetch_all_enums()

        # Assert that they all match
        self.assertEqual(len(migration_enums), len(schema_enums))
        for enum_name, migration_values in migration_enums.items():
            schema_values = schema_enums[enum_name]
            self.assertEqual(len(migration_values),
                             len(schema_values),
                             msg=f'{enum_name} lengths differ')
            self.assertEqual(len(migration_values),
                             len(migration_values.intersection(schema_values)),
                             msg=f'{enum_name} values differ')
Beispiel #4
0
 def setUp(self) -> None:
     self.db_dir = local_postgres_helpers.start_on_disk_postgresql_database(
     )
     self.overridden_env_vars = (
         local_postgres_helpers.update_local_sqlalchemy_postgres_env_vars())
     self.engine = create_engine(
         local_postgres_helpers.postgres_db_url_from_env_vars())
Beispiel #5
0
 def setUp(self) -> None:
     self.db_dir = local_postgres_helpers.start_on_disk_postgresql_database(
     )
     self.database_key = SQLAlchemyDatabaseKey.canonical_for_schema(
         self.schema_type)
     self.overridden_env_vars = (
         local_postgres_helpers.update_local_sqlalchemy_postgres_env_vars())
     self.engine = create_engine(
         local_postgres_helpers.postgres_db_url_from_env_vars())
Beispiel #6
0
def main(schema_type: SchemaType, message: str, use_local_db: bool) -> None:
    """Runs the script to autogenerate migrations."""
    database_key = SQLAlchemyDatabaseKey.canonical_for_schema(schema_type)
    if use_local_db:
        # TODO(#4619): We should eventually move this from a local postgres instance to running
        # postgres from a docker container.
        if not local_postgres_helpers.can_start_on_disk_postgresql_database():
            logging.error(
                "pg_ctl is not installed, so the script cannot be run locally. "
                "--project-id must be specified to run against staging or production."
            )
            logging.error("Exiting...")
            sys.exit(1)
        logging.info("Starting local postgres database for autogeneration...")
        tmp_db_dir = local_postgres_helpers.start_on_disk_postgresql_database()
        original_env_vars = (
            local_postgres_helpers.update_local_sqlalchemy_postgres_env_vars())
    else:
        # TODO(Recidiviz/zenhub-tasks#134): This code path will throw when pointed at staging
        # because we havne't created valid read-only users there just yet.
        try:
            original_env_vars = SQLAlchemyEngineManager.update_sqlalchemy_env_vars(
                database_key=database_key, readonly_user=True)
        except ValueError as e:
            logging.warning("Error fetching SQLAlchemy credentials: %s", e)
            logging.warning(
                "Until readonly users are created, we cannot autogenerate migrations against staging."
            )
            logging.warning(
                "See https://github.com/Recidiviz/zenhub-tasks/issues/134")
            sys.exit(1)

    try:
        config = alembic.config.Config(database_key.alembic_file)
        if use_local_db:
            upgrade(config, "head")
        revision(config, autogenerate=True, message=message)
    except Exception as e:
        logging.error("Automigration generation failed: %s", e)

    local_postgres_helpers.restore_local_env_vars(original_env_vars)
    if use_local_db:
        logging.info("Stopping local postgres database...")
        local_postgres_helpers.stop_and_clear_on_disk_postgresql_database(
            tmp_db_dir)
Beispiel #7
0
    def setUp(self) -> None:
        self.db_key = SQLAlchemyDatabaseKey.for_schema(SchemaType.CASE_TRIAGE)
        self.env_vars = (
            local_postgres_helpers.update_local_sqlalchemy_postgres_env_vars())

        # We need to build up the database using the migrations instead of
        # by just loading from the internal representation because the different
        # methods induce different orders.
        # The migration order is the one seen in staging/prod, as well as what
        # we do in development.
        engine = SQLAlchemyEngineManager.init_engine_for_postgres_instance(
            database_key=self.db_key,
            db_url=local_postgres_helpers.on_disk_postgres_db_url(),
        )
        with runner(
            {
                "file": self.db_key.alembic_file,
                "script_location": self.db_key.migrations_location,
            },
                engine,
        ) as r:
            r.migrate_up_to("head")
Beispiel #8
0
def _get_old_enum_values(schema_type: SchemaType, enum_name: str) -> List[str]:
    """Fetches the current enum values for the given schema and enum name."""
    # Setup temp pg database
    db_dir = local_postgres_helpers.start_on_disk_postgresql_database()
    database_key = SQLAlchemyDatabaseKey.canonical_for_schema(schema_type)
    overridden_env_vars = (
        local_postgres_helpers.update_local_sqlalchemy_postgres_env_vars())
    engine = create_engine(
        local_postgres_helpers.postgres_db_url_from_env_vars())

    try:
        # Fetch enums
        default_config = {
            "file": database_key.alembic_file,
            "script_location": database_key.migrations_location,
        }
        with runner(default_config, engine) as r:
            r.migrate_up_to("head")
        conn = engine.connect()
        rows = conn.execute(f"""
        SELECT e.enumlabel as enum_value
        FROM pg_type t
            JOIN pg_enum e ON t.oid = e.enumtypid
            JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace
        WHERE
            n.nspname = 'public'
            AND t.typname = '{enum_name}';
        """)
        enums = [row[0] for row in rows]
    finally:
        # Teardown temp pg database
        local_postgres_helpers.restore_local_env_vars(overridden_env_vars)
        local_postgres_helpers.stop_and_clear_on_disk_postgresql_database(
            db_dir)

    return enums
Beispiel #9
0
    def setUp(self) -> None:
        self.test_app = Flask(__name__)
        self.helpers = CaseTriageTestHelpers.from_test(self, self.test_app)
        self.test_client = self.helpers.test_client
        self.mock_segment_client = self.helpers.mock_segment_client

        self.database_key = SQLAlchemyDatabaseKey.for_schema(
            SchemaType.CASE_TRIAGE)
        self.overridden_env_vars = (
            local_postgres_helpers.update_local_sqlalchemy_postgres_env_vars())
        db_url = local_postgres_helpers.postgres_db_url_from_env_vars()
        engine = setup_scoped_sessions(self.test_app, db_url)
        # Auto-generate all tables that exist in our schema in this database
        self.database_key.declarative_meta.metadata.create_all(engine)
        self.session = self.test_app.scoped_session

        # Add seed data
        self.officer = generate_fake_officer("officer_id_1",
                                             "*****@*****.**")
        self.officer_without_clients = generate_fake_officer(
            "officer_id_2", "*****@*****.**")
        self.client_1 = generate_fake_client(
            client_id="client_1",
            supervising_officer_id=self.officer.external_id,
        )
        self.client_2 = generate_fake_client(
            client_id="client_2",
            supervising_officer_id=self.officer.external_id,
            last_assessment_date=date(2021, 2, 2),
        )
        self.client_3 = generate_fake_client(
            client_id="client_3",
            supervising_officer_id=self.officer.external_id,
        )
        self.client_info_3 = generate_fake_client_info(
            client=self.client_3,
            preferred_name="Alex",
        )
        self.case_update_1 = generate_fake_case_update(
            self.client_1,
            self.officer.external_id,
            action_type=CaseUpdateActionType.COMPLETED_ASSESSMENT,
            last_version=serialize_client_case_version(
                CaseUpdateActionType.COMPLETED_ASSESSMENT,
                self.client_1).to_json(),
        )
        self.case_update_2 = generate_fake_case_update(
            self.client_2,
            self.officer_without_clients.external_id,
            action_type=CaseUpdateActionType.COMPLETED_ASSESSMENT,
            last_version=serialize_client_case_version(
                CaseUpdateActionType.COMPLETED_ASSESSMENT,
                self.client_2).to_json(),
        )
        self.case_update_3 = generate_fake_case_update(
            self.client_1,
            self.officer.external_id,
            action_type=CaseUpdateActionType.NOT_ON_CASELOAD,
            last_version=serialize_client_case_version(
                CaseUpdateActionType.NOT_ON_CASELOAD, self.client_1).to_json(),
        )
        self.client_2.most_recent_assessment_date = date(2022, 2, 2)

        self.opportunity_1 = generate_fake_etl_opportunity(
            officer_id=self.officer.external_id,
            person_external_id=self.client_1.person_external_id,
        )
        tomorrow = datetime.now() + timedelta(days=1)
        self.deferral_1 = generate_fake_reminder(self.opportunity_1, tomorrow)
        self.opportunity_2 = generate_fake_etl_opportunity(
            officer_id=self.officer.external_id,
            person_external_id=self.client_2.person_external_id,
        )
        # all generated fake clients have no employer
        self.num_unemployed_opportunities = 3

        self.session.add_all([
            self.officer,
            self.officer_without_clients,
            self.client_1,
            self.client_2,
            self.client_3,
            self.client_info_3,
            self.case_update_1,
            self.case_update_2,
            self.case_update_3,
            self.opportunity_1,
            self.deferral_1,
            self.opportunity_2,
        ])
        self.session.commit()
Beispiel #10
0
 def setUp(self) -> None:
     self.db_dir = local_postgres_helpers.start_on_disk_postgresql_database(
     )
     self.overridden_env_vars = local_postgres_helpers.update_local_sqlalchemy_postgres_env_vars(
     )
Beispiel #11
0
def main(database: SchemaType, repo_root: str, ssl_cert_path: str,
         dry_run: bool):
    """
    Invokes the main code path for running migrations.

    This checks for user validations that the database and branches are correct and then runs existing pending
    migrations.
    """

    requires_ssl = SQLAlchemyEngineManager.database_requires_ssl(
        metadata.project_id())

    if requires_ssl and not ssl_cert_path:
        logging.error(
            'Specifying an argument to --ssl-cert-path is required for the specified database.'
        )
        logging.error('Exiting...')
        sys.exit(1)

    if dry_run:
        if not local_postgres_helpers.can_start_on_disk_postgresql_database():
            logging.error('pg_ctl is not installed. Cannot perform a dry-run.')
            logging.error('Exiting...')
            sys.exit(1)
        logging.info('Creating a dry-run...\n')

    is_prod = metadata.project_id() == GCP_PROJECT_PRODUCTION
    dbname = SQLAlchemyEngineManager.get_stripped_cloudsql_instance_id(
        database)
    if is_prod:
        logging.info('RUNNING AGAINST PRODUCTION\n')

    db_check = input(
        f'Running new migrations on {dbname}. Please type "{dbname}" to confirm. (Anything else exits):\n'
    )
    if db_check != dbname:
        logging.warning('\nConfirmation aborted.')
        sys.exit(1)

    # Get user to validate git branch
    try:
        repo = Repository(repo_root)
    except Exception as e:
        logging.error('improper project root provided: %s', e)
        sys.exit(1)

    current_branch = repo.head.shorthand

    if is_prod and not current_branch.startswith('releases/'):
        logging.error(
            'Migrations run against production must be from a release branch. The current branch is %s.',
            current_branch)
        sys.exit(1)

    branch_check = input(
        f'\nThis script will execute all migrations on this branch ({current_branch}) that have not yet been run '
        f'against {dbname}. Please type "{current_branch}" to confirm your branch. (Anything else exits):\n'
    )
    if branch_check != current_branch:
        logging.warning('\nConfirmation aborted.')
        sys.exit(1)

    # Fetch secrets
    if dry_run:
        overriden_env_vars = local_postgres_helpers.update_local_sqlalchemy_postgres_env_vars(
        )
    else:
        overriden_env_vars = SQLAlchemyEngineManager.update_sqlalchemy_env_vars(
            database, ssl_cert_path=ssl_cert_path)

    # Run migrations
    try:
        if dry_run:
            logging.info(
                'Starting local postgres database for migrations dry run')
            db_dir = local_postgres_helpers.start_on_disk_postgresql_database()
        config = alembic.config.Config(
            SQLAlchemyEngineManager.get_alembic_file(database))
        alembic.command.upgrade(config, 'head')
    except Exception as e:
        logging.error('Migrations failed to run: %s', e)
        sys.exit(1)
    finally:
        local_postgres_helpers.restore_local_env_vars(overriden_env_vars)
        if dry_run:
            try:
                logging.info('Stopping local postgres database')
                local_postgres_helpers.stop_and_clear_on_disk_postgresql_database(
                    db_dir)
            except Exception as e2:
                logging.error('Error cleaning up postgres: %s', e2)
def main(
    database: SchemaType, repo_root: str, ssl_cert_path: str, dry_run: bool
) -> None:
    """
    Invokes the main code path for running migrations.

    This checks for user validations that the database and branches are correct and then runs existing pending
    migrations.
    """
    if dry_run:
        if not local_postgres_helpers.can_start_on_disk_postgresql_database():
            logging.error("pg_ctl is not installed. Cannot perform a dry-run.")
            logging.error("Exiting...")
            sys.exit(1)
        logging.info("Creating a dry-run...\n")
    else:
        if not ssl_cert_path:
            logging.error(
                "SSL certificates are required when running against live databases"
            )
            logging.error("Exiting...")
            sys.exit(1)
        logging.info("Using SSL certificate path: %s", ssl_cert_path)

    is_prod = metadata.project_id() == GCP_PROJECT_PRODUCTION
    if is_prod:
        logging.info("RUNNING AGAINST PRODUCTION\n")

    confirm_correct_db(database)
    confirm_correct_git_branch(repo_root, is_prod=is_prod)

    if dry_run:
        overriden_env_vars = (
            local_postgres_helpers.update_local_sqlalchemy_postgres_env_vars()
        )
    else:
        overriden_env_vars = SQLAlchemyEngineManager.update_sqlalchemy_env_vars(
            database,
            ssl_cert_path=ssl_cert_path,
            migration_user=True,
        )

    # Run migrations
    try:
        if dry_run:
            logging.info("Starting local postgres database for migrations dry run")
            db_dir = local_postgres_helpers.start_on_disk_postgresql_database()
        config = alembic.config.Config(
            SQLAlchemyEngineManager.get_alembic_file(database)
        )
        alembic.command.upgrade(config, "head")
    except Exception as e:
        logging.error("Migrations failed to run: %s", e)
        sys.exit(1)
    finally:
        local_postgres_helpers.restore_local_env_vars(overriden_env_vars)
        if dry_run:
            try:
                logging.info("Stopping local postgres database")
                local_postgres_helpers.stop_and_clear_on_disk_postgresql_database(
                    db_dir
                )
            except Exception as e2:
                logging.error("Error cleaning up postgres: %s", e2)
Beispiel #13
0
    def setUp(self) -> None:
        self.test_app = Flask(__name__)
        self.test_app.register_blueprint(api)
        self.test_client = self.test_app.test_client()

        @self.test_app.errorhandler(FlaskException)
        def _handle_auth_error(ex: FlaskException) -> Response:
            response = jsonify({
                "code": ex.code,
                "description": ex.description,
            })
            response.status_code = ex.status_code
            return response

        self.overridden_env_vars = (
            local_postgres_helpers.update_local_sqlalchemy_postgres_env_vars())
        db_url = local_postgres_helpers.postgres_db_url_from_env_vars()
        setup_scoped_sessions(self.test_app, db_url)

        # Add seed data
        self.officer_1 = generate_fake_officer("officer_id_1")
        self.officer_2 = generate_fake_officer("officer_id_2")
        self.client_1 = generate_fake_client(
            client_id="client_1",
            supervising_officer_id=self.officer_1.external_id,
        )
        self.client_2 = generate_fake_client(
            client_id="client_2",
            supervising_officer_id=self.officer_1.external_id,
            last_assessment_date=date(2021, 2, 2),
        )
        self.case_update_1 = CaseUpdate(
            person_external_id=self.client_1.person_external_id,
            officer_external_id=self.client_1.supervising_officer_external_id,
            state_code=self.client_1.state_code,
            update_metadata={
                "actions":
                CaseUpdatesInterface.serialize_actions(
                    self.client_1,
                    [CaseUpdateActionType.COMPLETED_ASSESSMENT],
                ),
            },
        )
        self.case_update_2 = CaseUpdate(
            person_external_id=self.client_2.person_external_id,
            officer_external_id=self.client_2.supervising_officer_external_id,
            state_code=self.client_2.state_code,
            update_metadata={
                "actions":
                CaseUpdatesInterface.serialize_actions(
                    self.client_2,
                    [CaseUpdateActionType.COMPLETED_ASSESSMENT],
                ),
            },
        )
        self.client_2.most_recent_assessment_date = date(2022, 2, 2)

        sess = SessionFactory.for_schema_base(CaseTriageBase)
        sess.add(self.officer_1)
        sess.add(self.client_1)
        sess.add(self.client_2)
        sess.add(self.case_update_1)
        sess.add(self.case_update_2)
        sess.commit()