def upgrade(self): # NOTE(gordc): to minimise memory, only import migration when needed from oslo_db.sqlalchemy import migration path = os.path.join(os.path.abspath(os.path.dirname(__file__)), '..', '..', 'storage', 'sqlalchemy', 'migrate_repo') migration.db_sync(self._engine_facade.get_engine(), path)
def _sync_common_repo(version): abs_path = find_migrate_repo() init_version = migrate_repo.DB_INIT_VERSION engine = sql.get_engine() _assert_not_schema_downgrade(version=version) migration.db_sync(engine, abs_path, version=version, init_version=init_version, sanity_check=False)
def test_extension_federation_upgraded_values(self): abs_path = migration_helpers.find_migrate_repo(federation) migration.db_version_control(sql.get_engine(), abs_path) migration.db_sync(sql.get_engine(), abs_path, version=6) idp_table = sqlalchemy.Table("identity_provider", self.metadata, autoload=True) idps = [{'id': uuid.uuid4().hex, 'enabled': True, 'description': uuid.uuid4().hex, 'remote_id': uuid.uuid4().hex}, {'id': uuid.uuid4().hex, 'enabled': True, 'description': uuid.uuid4().hex, 'remote_id': uuid.uuid4().hex}] for idp in idps: ins = idp_table.insert().values({'id': idp['id'], 'enabled': idp['enabled'], 'description': idp['description'], 'remote_id': idp['remote_id']}) self.engine.execute(ins) migration.db_sync(sql.get_engine(), abs_path) idp_remote_ids_table = sqlalchemy.Table("idp_remote_ids", self.metadata, autoload=True) for idp in idps: s = idp_remote_ids_table.select().where( idp_remote_ids_table.c.idp_id == idp['id']) remote = self.engine.execute(s).fetchone() self.assertEqual(idp['remote_id'], remote['remote_id'], 'remote_ids must be preserved during the ' 'migration from identity_provider table to ' 'idp_remote_ids table')
def _sync_extension_repo(extension, version): if extension in MIGRATED_EXTENSIONS: raise exception.MigrationMovedFailure(extension=extension) init_version = 0 engine = sql.get_engine() try: package_name = '.'.join((contrib.__name__, extension)) package = importutils.import_module(package_name) except ImportError: raise ImportError(_("%s extension does not exist.") % package_name) try: abs_path = find_migrate_repo(package) try: migration.db_version_control(sql.get_engine(), abs_path) # Register the repo with the version control API # If it already knows about the repo, it will throw # an exception that we can safely ignore except exceptions.DatabaseAlreadyControlledError: # nosec pass except exception.MigrationNotProvided as e: print(e) sys.exit(1) _assert_not_schema_downgrade(extension=extension, version=version) migration.db_sync(engine, abs_path, version=version, init_version=init_version, sanity_check=False)
def _sync_common_repo(version): abs_path = find_repo(LEGACY_REPO) init_version = get_init_version() with sql.session_for_write() as session: engine = session.get_bind() _assert_not_schema_downgrade(version=version) migration.db_sync(engine, abs_path, version=version, init_version=init_version, sanity_check=False)
def _sync_common_repo(version): abs_path = find_migrate_repo() init_version = migrate_repo.DB_INIT_VERSION engine = sql.get_engine() try: migration.db_sync(engine, abs_path, version=version, init_version=init_version) except ValueError: # NOTE(morganfainberg): ValueError is raised from the sanity check ( # verifies that tables are utf8 under mysql). The region table was not # initially built with InnoDB and utf8 as part of the table arguments # when the migration was initially created. Bug #1334779 is a scenario # where the deployer can get wedged, unable to upgrade or downgrade. # This is a workaround to "fix" that table if we're under MySQL. if engine.name == 'mysql' and six.text_type(get_db_version()) == '37': _fix_migration_37(engine) # Try the migration a second time now that we've done the # un-wedge work. migration.db_sync(engine, abs_path, version=version, init_version=init_version) else: raise
def _sync_extension_repo(extension, version): init_version = 0 engine = sql.get_engine() try: package_name = '.'.join((extension)) package = importutils.import_module(package_name) except ImportError: raise ImportError(_("%s extension does not exist.") % package_name) try: abs_path = find_migrate_repo(package) try: migration.db_version_control(sql.get_engine(), abs_path) # Register the repo with the version control API # If it already knows about the repo, it will throw # an exception that we can safely ignore except exceptions.DatabaseAlreadyControlledError: pass except exception.MigrationNotProvided as e: print(e) sys.exit(1) _assert_not_schema_downgrade(extension=extension, version=version) migration.db_sync(engine, abs_path, version=version, init_version=init_version, sanity_check=False)
def _sync_extension_repo(extension, version): if extension in MIGRATED_EXTENSIONS: raise exception.MigrationMovedFailure(extension=extension) with sql.session_for_write() as session: engine = session.get_bind() try: package_name = '.'.join((contrib.__name__, extension)) package = importutils.import_module(package_name) except ImportError: raise ImportError(_("%s extension does not exist.") % package_name) try: abs_path = find_migrate_repo(package) try: migration.db_version_control(engine, abs_path) # Register the repo with the version control API # If it already knows about the repo, it will throw # an exception that we can safely ignore except exceptions.DatabaseAlreadyControlledError: # nosec pass except exception.MigrationNotProvided as e: print(e) sys.exit(1) _assert_not_schema_downgrade(extension=extension, version=version) init_version = get_init_version(abs_path=abs_path) migration.db_sync(engine, abs_path, version=version, init_version=init_version, sanity_check=False)
def exec_db_sync(version=None, init_version=INIT_VERSION, engine=None): if engine is None: engine = get_engine() db_sync(engine=engine, abs_path=MIGRATE_REPO_PATH, version=version, init_version=init_version)
def _sync_common_repo(version): abs_path = find_migrate_repo() init_version = migrate_repo.DB_INIT_VERSION engine = sql.get_engine() migration.db_sync(engine, abs_path, version=version, init_version=init_version)
def sync(self, version=None, current_version=None): """ Place a database under migration control and upgrade/downgrade it, creating first if necessary. """ if current_version not in (None, "None"): migration.db_version_control(db_api.get_engine(), db_migration.MIGRATE_REPO_PATH, version=current_version) migration.db_sync(db_api.get_engine(), db_migration.MIGRATE_REPO_PATH, version)
def downgrade(self, version=None): """Downgrade the database's migration level""" print("Warning: DB downgrade is deprecated and will be removed in N " "release. Users should make a full database backup of the " "production data before attempting any upgrade.", file=sys.stderr) migration.db_sync(db_api.get_engine(), db_migration.MIGRATE_REPO_PATH, version)
def downgrade(self, version=None): """Downgrade the database's migration level""" print( "Warning: DB downgrade is deprecated and will be removed in N " "release. Users should make a full database backup of the " "production data before attempting any upgrade.", file=sys.stderr) migration.db_sync(db_api.get_engine(), db_migration.MIGRATE_REPO_PATH, version)
def test_extension_migrated(self): """When get the version after migrating an extension, it's not 0.""" for name, extension in six.iteritems(EXTENSIONS): abs_path = migration_helpers.find_migrate_repo(extension) migration.db_version_control(sql.get_engine(), abs_path) migration.db_sync(sql.get_engine(), abs_path) version = migration_helpers.get_db_version(extension=name) self.assertTrue( version > 0, "Version for %s didn't change after migrated?" % name)
def test_db_sync_sanity_called(self): with test_utils.nested( mock.patch.object(migration, '_find_migrate_repo'), mock.patch.object(migration, '_db_schema_sanity_check'), mock.patch.object(versioning_api, 'downgrade') ) as (mock_find_repo, mock_sanity, mock_downgrade): mock_find_repo.return_value = self.return_value migration.db_sync(self.engine, self.path, self.test_version) mock_sanity.assert_called_once_with(self.engine)
def sync(self, version=None, current_version=None): """ Place a database under migration control and upgrade/downgrade it, creating first if necessary. """ if current_version not in (None, 'None'): migration.db_version_control(db_api.get_engine(), db_migration.MIGRATE_REPO_PATH, version=current_version) migration.db_sync(db_api.get_engine(), db_migration.MIGRATE_REPO_PATH, version)
def test_extension_migrated(self): """When get the version after migrating an extension, it's not 0.""" for name, extension in six.iteritems(EXTENSIONS): abs_path = migration_helpers.find_migrate_repo(extension) migration.db_version_control(sql.get_engine(), abs_path) migration.db_sync(sql.get_engine(), abs_path) version = migration_helpers.get_db_version(extension=name) self.assertTrue(version > 0, "Version for %s didn't change after migrated?" % name) # Verify downgrades cannot occur self.assertRaises( db_exception.DbMigrationError, migration_helpers._sync_extension_repo, extension=name, version=0 )
def test_db_sync_sanity_skipped(self): with test_utils.nested( mock.patch.object(migration, '_find_migrate_repo'), mock.patch.object(migration, '_db_schema_sanity_check'), mock.patch.object(versioning_api, 'downgrade') ) as (mock_find_repo, mock_sanity, mock_downgrade): mock_find_repo.return_value = self.return_value migration.db_sync(self.engine, self.path, self.test_version, sanity_check=False) self.assertFalse(mock_sanity.called)
def test_db_sync_sanity_called(self): with test_utils.nested( mock.patch.object(migration, '_find_migrate_repo'), mock.patch.object(migration, '_db_schema_sanity_check'), mock.patch.object(versioning_api, 'downgrade') ) as (mock_find_repo, mock_sanity, mock_downgrade): mock_find_repo.return_value = self.return_value migration.db_sync(self.engine, self.path, self.test_version) self.assertEqual([mock.call(self.engine), mock.call(self.engine)], mock_sanity.call_args_list)
def _sync_repo(repo_name): abs_path = find_repo(repo_name) with sql.session_for_write() as session: engine = session.get_bind() # Register the repo with the version control API # If it already knows about the repo, it will throw # an exception that we can safely ignore try: migration.db_version_control(engine, abs_path) except (migration.exception.DbMigrationError, exceptions.DatabaseAlreadyControlledError): # nosec pass init_version = get_init_version(abs_path=abs_path) migration.db_sync(engine, abs_path, init_version=init_version, sanity_check=False)
def test_db_sync_downgrade(self): with test_utils.nested( mock.patch.object(migration, '_find_migrate_repo'), mock.patch.object(versioning_api, 'downgrade') ) as (mock_find_repo, mock_downgrade): mock_find_repo.return_value = self.return_value self.mock_api_db_version.return_value = self.test_version + 1 migration.db_sync(self.engine, self.path, self.test_version) mock_downgrade.assert_called_once_with( self.engine, self.return_value, self.test_version)
def _sync_extension_repo(extension, version): init_version = 0 engine = sql.get_engine() try: package_name = '.'.join((contrib.__name__, extension)) package = importutils.import_module(package_name) except ImportError: raise ImportError(_("%s extension does not exist.") % package_name) try: abs_path = find_migrate_repo(package) try: migration.db_version_control(sql.get_engine(), abs_path) # Register the repo with the version control API # If it already knows about the repo, it will throw # an exception that we can safely ignore except exceptions.DatabaseAlreadyControlledError: pass except exception.MigrationNotProvided as e: print(e) sys.exit(1) _assert_not_schema_downgrade(extension=extension, version=version) try: migration.db_sync(engine, abs_path, version=version, init_version=init_version) except ValueError: # NOTE(marco-fargetta): ValueError is raised from the sanity check ( # verifies that tables are utf8 under mysql). The federation_protocol, # identity_provider and mapping tables were not initially built with # InnoDB and utf8 as part of the table arguments when the migration # was initially created. Bug #1426334 is a scenario where the deployer # can get wedged, unable to upgrade or downgrade. # This is a workaround to "fix" those tables if we're under MySQL and # the version is before the 6 because before the tables were introduced # before and patched when migration 5 was available if engine.name == 'mysql' and \ int(six.text_type(get_db_version(extension))) < 6: _fix_federation_tables(engine) # The migration is applied again after the fix migration.db_sync(engine, abs_path, version=version, init_version=init_version) else: raise
def _setup_database(extensions=None): if CONF.database.connection != tests.IN_MEM_DB_CONN_STRING: db = tests.dirs.tmp('test.db') pristine = tests.dirs.tmp('test.db.pristine') if os.path.exists(db): os.unlink(db) if not os.path.exists(pristine): migration.db_sync(sql.get_engine(), migration_helpers.find_migrate_repo()) for extension in (extensions or []): migration_helpers.sync_database_to_version(extension=extension) shutil.copyfile(db, pristine) else: shutil.copyfile(pristine, db)
def _sync_repo(repo_name): abs_path = find_repo(repo_name) with sql.session_for_write() as session: engine = session.get_bind() # Register the repo with the version control API # If it already knows about the repo, it will throw # an exception that we can safely ignore try: migration.db_version_control(engine, abs_path) except (migration.exception.DBMigrationError, exceptions.DatabaseAlreadyControlledError): # nosec pass init_version = get_init_version(abs_path=abs_path) migration.db_sync(engine, abs_path, init_version=init_version, sanity_check=False)
def test_extension_migrated(self): """When get the version after migrating an extension, it's not 0.""" for name, extension in EXTENSIONS.items(): abs_path = migration_helpers.find_migrate_repo(extension) migration.db_version_control(sql.get_engine(), abs_path) migration.db_sync(sql.get_engine(), abs_path) version = migration_helpers.get_db_version(extension=name) self.assertTrue( version > 0, "Version for %s didn't change after migrated?" % name) # Verify downgrades cannot occur self.assertRaises(db_exception.DbMigrationError, migration_helpers._sync_extension_repo, extension=name, version=0)
def db_sync(engine, version=None): path = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'migrate_repo') return oslo_migration.db_sync(engine, path, version, init_version=INIT_VERSION)
def db_sync(self, version=None, init_version=INIT_VERSION, engine=None): """Migrate the database to `version` or the most recent version.""" if not self._engine: self._engine = self.get_engine() return migration.db_sync(engine=self._engine, abs_path=MIGRATE_REPO_PATH, version=version, init_version=init_version)
def upgrade(self): # NOTE(gordc): to minimise memory, only import migration when needed from oslo_db.sqlalchemy import migration path = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'sqlalchemy', 'migrate_repo') engine = self._engine_facade.get_engine() from migrate import exceptions as migrate_exc from migrate.versioning import api from migrate.versioning import repository repo = repository.Repository(path) try: api.db_version(engine, repo) except migrate_exc.DatabaseNotControlledError: models.Base.metadata.create_all(engine) api.version_control(engine, repo, repo.latest) else: migration.db_sync(engine, path)
def db_sync(version=None, engine=None): """Migrate the database to `version` or the most recent version.""" if engine is None: engine = db_api.get_engine() return migration.db_sync(engine=engine, abs_path=MIGRATE_REPO_PATH, version=version, init_version=INIT_VERSION)
def _sync_extension_repo(extension, version): init_version = 0 try: package_name = '.'.join((contrib.__name__, extension)) package = importutils.import_module(package_name) except ImportError: raise ImportError(_("%s extension does not exist.") % package_name) try: abs_path = find_migrate_repo(package) try: migration.db_version_control(sql.get_engine(), abs_path) # Register the repo with the version control API # If it already knows about the repo, it will throw # an exception that we can safely ignore except exceptions.DatabaseAlreadyControlledError: pass except exception.MigrationNotProvided as e: print(e) sys.exit(1) migration.db_sync(sql.get_engine(), abs_path, version=version, init_version=init_version)
def downgrade(self, version): try: # version for migrate should be valid int - else skip if version in ('base', None): version = self.init_version version = int(version) return migration.db_sync(self.engine, self.repository, version, init_version=self.init_version) except ValueError: LOG.error('Migration number for migrate plugin must be valid ' 'integer or empty, if you want to downgrade ' 'to initial state') raise
def downgrade(self, version): try: # version for migrate should be valid int - else skip if version in ('base', None): version = self.init_version version = int(version) return migration.db_sync( self.engine, self.repository, version, init_version=self.init_version) except ValueError: LOG.error( _LE('Migration number for migrate plugin must be valid ' 'integer or empty, if you want to downgrade ' 'to initial state') ) raise
def sync_repo(version): repo_abs_path = find_migrate_repo() init_version = migrate_repo.DB_INIT_VERSION engine = core.get_engine() migration.db_sync(engine, repo_abs_path, version, init_version)
def upgrade(self, version): version = None if version == 'head' else version return migration.db_sync( self.engine, self.repository, version, init_version=self.init_version)
def downgrade(self, version=None): """Downgrade the database's migration level""" migration.db_sync(db_api.get_engine(), db_migration.MIGRATE_REPO_PATH, version)
def db_sync(engine, version=None): path = os.path.join(os.path.abspath(os.path.dirname(__file__)), "migrate_repo") return oslo_migration.db_sync(engine, path, version, init_version=INIT_VERSION)