def test_extension_federation_upgraded_values(self): abs_path = migration_helpers.find_migrate_repo(federation) migration.db_version_control(sql.get_engine(), abs_path) migration.db_sync(sql.get_engine(), abs_path, version=6) idp_table = sqlalchemy.Table("identity_provider", self.metadata, autoload=True) idps = [{'id': uuid.uuid4().hex, 'enabled': True, 'description': uuid.uuid4().hex, 'remote_id': uuid.uuid4().hex}, {'id': uuid.uuid4().hex, 'enabled': True, 'description': uuid.uuid4().hex, 'remote_id': uuid.uuid4().hex}] for idp in idps: ins = idp_table.insert().values({'id': idp['id'], 'enabled': idp['enabled'], 'description': idp['description'], 'remote_id': idp['remote_id']}) self.engine.execute(ins) migration.db_sync(sql.get_engine(), abs_path) idp_remote_ids_table = sqlalchemy.Table("idp_remote_ids", self.metadata, autoload=True) for idp in idps: s = idp_remote_ids_table.select().where( idp_remote_ids_table.c.idp_id == idp['id']) remote = self.engine.execute(s).fetchone() self.assertEqual(idp['remote_id'], remote['remote_id'], 'remote_ids must be preserved during the ' 'migration from identity_provider table to ' 'idp_remote_ids table')
def sync_database_to_version(extension=None, version=None): if not extension: abs_path = find_migrate_repo() init_version = migrate_repo.DB_INIT_VERSION else: init_version = 0 try: package_name = '.'.join((contrib.__name__, extension)) package = importutils.import_module(package_name) except ImportError: raise ImportError(_("%s extension does not exist.") % package_name) try: abs_path = find_migrate_repo(package) try: migration.db_version_control(sql.get_engine(), abs_path) # Register the repo with the version control API # If it already knows about the repo, it will throw # an exception that we can safely ignore except exceptions.DatabaseAlreadyControlledError: pass except exception.MigrationNotProvided as e: print(e) sys.exit(1) migration.db_sync(sql.get_engine(), abs_path, version=version, init_version=init_version)
def _sync_extension_repo(extension, version): if extension in MIGRATED_EXTENSIONS: raise exception.MigrationMovedFailure(extension=extension) init_version = 0 engine = sql.get_engine() try: package_name = '.'.join((contrib.__name__, extension)) package = importutils.import_module(package_name) except ImportError: raise ImportError(_("%s extension does not exist.") % package_name) try: abs_path = find_migrate_repo(package) try: migration.db_version_control(sql.get_engine(), abs_path) # Register the repo with the version control API # If it already knows about the repo, it will throw # an exception that we can safely ignore except exceptions.DatabaseAlreadyControlledError: # nosec pass except exception.MigrationNotProvided as e: print(e) sys.exit(1) _assert_not_schema_downgrade(extension=extension, version=version) migration.db_sync(engine, abs_path, version=version, init_version=init_version, sanity_check=False)
def setup_database(self): super(OAuth1Tests, self).setup_database() package_name = '.'.join((contrib.__name__, self.EXTENSION_NAME)) package = importutils.import_module(package_name) abs_path = migration_helpers.find_migrate_repo(package) migration.db_version_control(sql.get_engine(), abs_path) migration.db_sync(sql.get_engine(), abs_path)
def test_extension_migrated(self): """When get the version after migrating an extension, it's not 0.""" for name, extension in six.iteritems(EXTENSIONS): abs_path = migration_helpers.find_migrate_repo(extension) migration.db_version_control(sql.get_engine(), abs_path) migration.db_sync(sql.get_engine(), abs_path) version = migration_helpers.get_db_version(extension=name) self.assertTrue( version > 0, "Version for %s didn't change after migrated?" % name)
def get_db_version(extension=None): if not extension: return migration.db_version(sql.get_engine(), find_migrate_repo(), migrate_repo.DB_INIT_VERSION) try: package_name = ".".join((contrib.__name__, extension)) package = importutils.import_module(package_name) except ImportError: raise ImportError(_("%s extension does not exist.") % package_name) return migration.db_version(sql.get_engine(), find_migrate_repo(package), 0)
def get_db_version(extension=None): if not extension: return migration.db_version(sql.get_engine(), find_migrate_repo(), 0) try: package_name = '.'.join((contrib.__name__, extension)) package = importutils.import_module(package_name) except ImportError: raise ImportError(_("%s extension does not exist.") % package_name) return migration.db_version(sql.get_engine(), find_migrate_repo(package), 0)
def test_extension_migrated(self): """When get the version after migrating an extension, it's not 0.""" for name, extension in six.iteritems(EXTENSIONS): abs_path = migration_helpers.find_migrate_repo(extension) migration.db_version_control(sql.get_engine(), abs_path) migration.db_sync(sql.get_engine(), abs_path) version = migration_helpers.get_db_version(extension=name) self.assertTrue(version > 0, "Version for %s didn't change after migrated?" % name) # Verify downgrades cannot occur self.assertRaises( db_exception.DbMigrationError, migration_helpers._sync_extension_repo, extension=name, version=0 )
def _sync_extension_repo(extension, version): init_version = 0 engine = sql.get_engine() try: package_name = '.'.join((contrib.__name__, extension)) package = importutils.import_module(package_name) except ImportError: raise ImportError(_("%s extension does not exist.") % package_name) try: abs_path = find_migrate_repo(package) try: migration.db_version_control(sql.get_engine(), abs_path) # Register the repo with the version control API # If it already knows about the repo, it will throw # an exception that we can safely ignore except exceptions.DatabaseAlreadyControlledError: pass except exception.MigrationNotProvided as e: print(e) sys.exit(1) _assert_not_schema_downgrade(extension=extension, version=version) try: migration.db_sync(engine, abs_path, version=version, init_version=init_version) except ValueError: # NOTE(marco-fargetta): ValueError is raised from the sanity check ( # verifies that tables are utf8 under mysql). The federation_protocol, # identity_provider and mapping tables were not initially built with # InnoDB and utf8 as part of the table arguments when the migration # was initially created. Bug #1426334 is a scenario where the deployer # can get wedged, unable to upgrade or downgrade. # This is a workaround to "fix" those tables if we're under MySQL and # the version is before the 6 because before the tables were introduced # before and patched when migration 5 was available if engine.name == 'mysql' and \ int(six.text_type(get_db_version(extension))) < 6: _fix_federation_tables(engine) # The migration is applied again after the fix migration.db_sync(engine, abs_path, version=version, init_version=init_version) else: raise
def test_extension_migrated(self): """When get the version after migrating an extension, it's not 0.""" for name, extension in EXTENSIONS.items(): abs_path = migration_helpers.find_migrate_repo(extension) migration.db_version_control(sql.get_engine(), abs_path) migration.db_sync(sql.get_engine(), abs_path) version = migration_helpers.get_db_version(extension=name) self.assertTrue( version > 0, "Version for %s didn't change after migrated?" % name) # Verify downgrades cannot occur self.assertRaises(db_exception.DbMigrationError, migration_helpers._sync_extension_repo, extension=name, version=0)
def _sync_common_repo(version): abs_path = find_migrate_repo() init_version = migrate_repo.DB_INIT_VERSION engine = sql.get_engine() try: migration.db_sync(engine, abs_path, version=version, init_version=init_version) except ValueError: # NOTE(morganfainberg): ValueError is raised from the sanity check ( # verifies that tables are utf8 under mysql). The region table was not # initially built with InnoDB and utf8 as part of the table arguments # when the migration was initially created. Bug #1334779 is a scenario # where the deployer can get wedged, unable to upgrade or downgrade. # This is a workaround to "fix" that table if we're under MySQL. if engine.name == 'mysql' and six.text_type(get_db_version()) == '37': _fix_migration_37(engine) # Try the migration a second time now that we've done the # un-wedge work. migration.db_sync(engine, abs_path, version=version, init_version=init_version) else: raise
def test_extension_initial(self): """When get the initial version of an extension, it's 0.""" for name, extension in six.iteritems(EXTENSIONS): abs_path = migration_helpers.find_migrate_repo(extension) migration.db_version_control(sql.get_engine(), abs_path) version = migration_helpers.get_db_version(extension=name) self.assertEqual(0, version, "Migrate version for %s is not 0" % name)
def load_backends(self): """Initializes each manager and assigns them to an attribute.""" # TODO(blk-u): Shouldn't need to clear the registry here, but some # tests call load_backends multiple times. These should be fixed to # only call load_backends once. dependency.reset() # TODO(morganfainberg): Shouldn't need to clear the registry here, but # some tests call load_backends multiple times. Since it is not # possible to re-configure a backend, we need to clear the list. This # should eventually be removed once testing has been cleaned up. kvs_core.KEY_VALUE_STORE_REGISTRY.clear() self.clear_auth_plugin_registry() drivers = service.load_backends() drivers.update(dependency.resolve_future_dependencies()) for manager_name, manager in six.iteritems(drivers): setattr(self, manager_name, manager) self.addCleanup(self.cleanup_instance(*drivers.keys())) # The credential backend only supports SQL, so we always have to load # the tables. self.engine = sql.get_engine() self.addCleanup(sql.cleanup) self.addCleanup(self.cleanup_instance('engine')) sql.ModelBase.metadata.create_all(bind=self.engine) self.addCleanup(sql.ModelBase.metadata.drop_all, bind=self.engine)
def _sync_common_repo(version): abs_path = find_migrate_repo() init_version = migrate_repo.DB_INIT_VERSION engine = sql.get_engine() _assert_not_schema_downgrade(version=version) migration.db_sync(engine, abs_path, version=version, init_version=init_version, sanity_check=False)
def setUp(self): super(SqlMigrateBase, self).setUp() database.initialize_sql_session() conn_str = CONF.database.connection if (conn_str != tests.IN_MEM_DB_CONN_STRING and conn_str.startswith('sqlite') and conn_str[10:] == tests.DEFAULT_TEST_DB_FILE): # Override the default with a DB that is specific to the migration # tests only if the DB Connection string is the same as the global # default. This is required so that no conflicts occur due to the # global default DB already being under migrate control. This is # only needed if the DB is not-in-memory db_file = tests.dirs.tmp('keystone_migrate_test.db') self.config_fixture.config(group='database', connection='sqlite:///%s' % db_file) # create and share a single sqlalchemy engine for testing self.engine = sql.get_engine() self.Session = db_session.get_maker(self.engine, autocommit=False) self.initialize_sql() self.repo_path = migration_helpers.find_migrate_repo( self.repo_package()) self.schema = versioning_api.ControlledSchema.create( self.engine, self.repo_path, self.initial_db_version) # auto-detect the highest available schema version in the migrate_repo self.max_version = self.schema.repository.version().version
def setUp(self): super(Database, self).setUp() self.engine = sql.get_engine() self.addCleanup(sql.cleanup) sql.ModelBase.metadata.create_all(bind=self.engine) self.addCleanup(sql.ModelBase.metadata.drop_all, bind=self.engine)
def setUp(self): super(SqlMigrateBase, self).setUp() database.initialize_sql_session() conn_str = CONF.database.connection if (conn_str != tests.IN_MEM_DB_CONN_STRING and conn_str.startswith('sqlite') and conn_str[10:] == tests.DEFAULT_TEST_DB_FILE): # Override the default with a DB that is specific to the migration # tests only if the DB Connection string is the same as the global # default. This is required so that no conflicts occur due to the # global default DB already being under migrate control. This is # only needed if the DB is not-in-memory db_file = tests.dirs.tmp('keystone_migrate_test.db') self.config_fixture.config( group='database', connection='sqlite:///%s' % db_file) # create and share a single sqlalchemy engine for testing self.engine = sql.get_engine() self.Session = db_session.get_maker(self.engine, autocommit=False) self.initialize_sql() self.repo_path = migration_helpers.find_migrate_repo( self.repo_package()) self.schema = versioning_api.ControlledSchema.create( self.engine, self.repo_path, self.initial_db_version) # auto-detect the highest available schema version in the migrate_repo self.max_version = self.schema.repository.version().version
def test_start_version_db_init_version(self): version = migration.db_version(sql.get_engine(), self.repo_path, migrate_repo.DB_INIT_VERSION) self.assertEqual( migrate_repo.DB_INIT_VERSION, version, 'DB is not at version %s' % migrate_repo.DB_INIT_VERSION)
def test_extension_initial(self): """When get the initial version of an extension, it's 0.""" for name, extension in six.iteritems(EXTENSIONS): abs_path = migration_helpers.find_migrate_repo(extension) migration.db_version_control(sql.get_engine(), abs_path) version = migration_helpers.get_db_version(extension=name) self.assertEqual(0, version, 'Migrate version for %s is not 0' % name)
def _sync_common_repo(version): abs_path = find_migrate_repo() init_version = migrate_repo.DB_INIT_VERSION engine = sql.get_engine() migration.db_sync(engine, abs_path, version=version, init_version=init_version)
def sync_database_to_version(extension=None, version=None): if not extension: abs_path = find_migrate_repo() init_version = migrate_repo.DB_INIT_VERSION else: init_version = 0 try: package_name = '.'.join((contrib.__name__, extension)) package = importutils.import_module(package_name) except ImportError: raise ImportError(_("%s extension does not exist.") % package_name) try: abs_path = find_migrate_repo(package) try: migration.db_version_control(sql.get_engine(), abs_path) # Register the repo with the version control API # If it already knows about the repo, it will throw # an exception that we can safely ignore except exceptions.DatabaseAlreadyControlledError: pass except exception.MigrationNotProvided as e: print(e) sys.exit(1) engine = sql.get_engine() try: migration.db_sync(engine, abs_path, version=version, init_version=init_version) except ValueError: # NOTE(morganfainberg): ValueError is raised from the sanity check ( # verifies that tables are utf8 under mysql). The region table was not # initially built with InnoDB and utf8 as part of the table arguments # when the migration was initially created. Bug #1334779 is a scenario # where the deployer can get wedged, unable to upgrade or downgrade. # This is a workaround to "fix" that table if we're under MySQL. if (not extension and engine.name == 'mysql' and six.text_type(get_db_version()) == '37'): _fix_migration_37(engine) # Try the migration a second time now that we've done the # un-wedge work. migration.db_sync(engine, abs_path, version=version, init_version=init_version) else: raise
def setup_database(): db = dirs.tmp('test.db') pristine = dirs.tmp('test.db.pristine') if os.path.exists(db): os.unlink(db) if not os.path.exists(pristine): migration.db_sync(sql.get_engine(), migration_helpers.find_migrate_repo()) migration_helpers.sync_database_to_version(extension='revoke') shutil.copyfile(db, pristine) else: shutil.copyfile(pristine, db)
def config(self, config_files): super(CompatTestCase, self).config(config_files) # FIXME(morganfainberg): Since we are running tests through the # controllers and some internal api drivers are SQL-only, the correct # approach is to ensure we have the correct backing store. The # credential api makes some very SQL specific assumptions that should # be addressed allowing for non-SQL based testing to occur. self.load_backends() self.engine = sql.get_engine() self.addCleanup(sql.cleanup) self.addCleanup(sql.ModelBase.metadata.drop_all, bind=self.engine) sql.ModelBase.metadata.create_all(bind=self.engine)
def setUp(self): self.useFixture(database.Database()) super(BaseBackendLdapIdentitySqlEverythingElse, self).setUp() self.clear_database() self.load_backends() cache.configure_cache_region(cache.REGION) self.engine = sql.get_engine() self.addCleanup(sql.cleanup) sql.ModelBase.metadata.create_all(bind=self.engine) self.addCleanup(sql.ModelBase.metadata.drop_all, bind=self.engine) self.load_fixtures(default_fixtures) # defaulted by the data load self.user_foo['enabled'] = True
def _setup_database(extensions=None): if CONF.database.connection != tests.IN_MEM_DB_CONN_STRING: db = tests.dirs.tmp('test.db') pristine = tests.dirs.tmp('test.db.pristine') if os.path.exists(db): os.unlink(db) if not os.path.exists(pristine): migration.db_sync(sql.get_engine(), migration_helpers.find_migrate_repo()) for extension in (extensions or []): migration_helpers.sync_database_to_version(extension=extension) shutil.copyfile(db, pristine) else: shutil.copyfile(pristine, db)
def db_sync(self): migration.db_sync(sql.get_engine(), migration_helpers.find_migrate_repo(oauth1))
def db_sync(self): abs_path = migration_helpers.find_migrate_repo(federation) migration.db_sync(sql.get_engine(), abs_path)
def db_sync(self, version=None): migration.db_sync( sql.get_engine(), migration_helpers.find_migrate_repo(), version=version)
def db_sync(self, version=None): abs_path = migration_helpers.find_migrate_repo(endpoint_filter) migration.db_sync(sql.get_engine(), abs_path, version=version)
def db_sync(self, version=None): migration.db_sync(sql.get_engine(), migration_helpers.find_migrate_repo(), version=version)