def setup(): import mox # Fail fast if you don't have mox. Workaround for bug 810424 from cinder import rpc # Register rpc_backend before fake_flags sets it FLAGS.register_opts(rpc.rpc_opts) from cinder.db import migration from cinder.tests import fake_flags if FLAGS.sql_connection == "sqlite://": if migration.db_version() > 1: return else: testdb = os.path.join(FLAGS.state_path, FLAGS.sqlite_db) if os.path.exists(testdb): return migration.db_sync() if FLAGS.sql_connection == "sqlite://": global _DB engine = get_engine() conn = engine.connect() _DB = "".join(line for line in conn.connection.iterdump()) else: cleandb = os.path.join(FLAGS.state_path, FLAGS.sqlite_clean_db) shutil.copyfile(testdb, cleandb)
def __init__(self, connection, sqlite_synchronous=True, soft_deletes=False): self.soft_deletes = soft_deletes cfg.CONF.set_override('connection', connection, 'database') cfg.CONF.set_override('sqlite_synchronous', sqlite_synchronous, 'database') # Suppress logging for migration migrate_logger = logging.getLogger('migrate') migrate_logger.setLevel(logging.WARNING) self._clear_facade() self.db_instance = db_api.oslo_db_api.DBAPI.from_config( conf=cfg.CONF, backend_mapping=db_api._BACKEND_MAPPING, lazy=True) # We need to wrap some get methods that get called before the volume is # actually created. self.original_vol_type_get = self.db_instance.volume_type_get self.db_instance.volume_type_get = self.vol_type_get self.original_qos_specs_get = self.db_instance.qos_specs_get self.db_instance.qos_specs_get = self.qos_specs_get self.original_get_by_id = self.db_instance.get_by_id self.db_instance.get_by_id = self.get_by_id migration.db_sync() self._create_key_value_table() super(DBPersistence, self).__init__()
def sync(self, version=None, bump_versions=False): """Sync the database up to the most recent version.""" if version is not None and version > db.MAX_INT: print( _('Version should be less than or equal to ' '%(max_version)d.') % {'max_version': db.MAX_INT}) sys.exit(1) try: db_migration.db_sync(version) except db_exc.DBMigrationError as ex: print("Error during database migration: %s" % ex) sys.exit(1) try: if bump_versions: ctxt = context.get_admin_context() services = objects.ServiceList.get_all(ctxt) for service in services: rpc_version = RPC_VERSIONS[service.binary] if (service.rpc_current_version != rpc_version or service.object_current_version != OVO_VERSION): service.rpc_current_version = rpc_version service.object_current_version = OVO_VERSION service.save() except Exception as ex: print(_('Error during service version bump: %s') % ex) sys.exit(2)
def _cache_schema(self): global DB_SCHEMA if not DB_SCHEMA: engine = sqla_api.get_engine() conn = engine.connect() migration.db_sync() DB_SCHEMA = "".join(line for line in conn.connection.iterdump()) engine.dispose()
def __init__(self, connection, sqlite_synchronous=True, soft_deletes=False): self.soft_deletes = soft_deletes cfg.CONF.set_override('connection', connection, 'database') cfg.CONF.set_override('sqlite_synchronous', sqlite_synchronous, 'database') # Suppress logging for migration migrate_logger = logging.getLogger('migrate') migrate_logger.setLevel(logging.WARNING) self._clear_facade() self.db_instance = db_api.oslo_db_api.DBAPI.from_config( conf=cfg.CONF, backend_mapping=db_api._BACKEND_MAPPING, lazy=True) # We need to wrap some get methods that get called before the volume is # actually created. self.original_vol_type_get = self.db_instance.volume_type_get self.db_instance.volume_type_get = self.vol_type_get self.original_qos_specs_get = self.db_instance.qos_specs_get self.db_instance.qos_specs_get = self.qos_specs_get self.original_get_by_id = self.db_instance.get_by_id self.db_instance.get_by_id = self.get_by_id try: migration.db_sync() except exception.DBMigrationError as exc: # We can be running 2 Cinder versions at the same time on the same # DB while we upgrade, so we must ignore the fact that the DB is # now on a newer version. if not isinstance(getattr(exc, 'inner_exception', None), migrate.exceptions.VersionNotFoundError): raise self._create_key_value_table() # NOTE : At this point, the persistence isn't ready so we need to use # db_instance instead of sqlalchemy API or DB API. orm_obj = self.db_instance.volume_type_get_by_name( objects.CONTEXT, '__DEFAULT__') cls = cinder_objs.VolumeType expected_attrs = cls._get_expected_attrs(objects.CONTEXT) self.DEFAULT_TYPE = cls._from_db_object(objects.CONTEXT, cls(objects.CONTEXT), orm_obj, expected_attrs=expected_attrs) super(DBPersistence, self).__init__()
def sync(self, version=None, bump_versions=False): """Sync the database up to the most recent version.""" if version is not None and version > db.MAX_INT: print(_('Version should be less than or equal to ' '%(max_version)d.') % {'max_version': db.MAX_INT}) sys.exit(1) try: result = db_migration.db_sync(version) except db_exc.DBMigrationError as ex: print("Error during database migration: %s" % ex) sys.exit(1) try: if bump_versions: ctxt = context.get_admin_context() services = objects.ServiceList.get_all(ctxt) for service in services: rpc_version = RPC_VERSIONS[service.binary] if (service.rpc_current_version != rpc_version or service.object_current_version != OVO_VERSION): service.rpc_current_version = rpc_version service.object_current_version = OVO_VERSION service.save() except Exception as ex: print(_('Error during service version bump: %s') % ex) sys.exit(2) return result
def _test_db_sync( self, has_migrate, has_alembic, mock_get_engine, mock_find_repo, mock_find_conf, mock_is_migrate, mock_is_alembic, mock_init, mock_upgrade, ): mock_is_migrate.return_value = has_migrate mock_is_alembic.return_value = has_alembic migration.db_sync() mock_get_engine.assert_called_once_with() mock_find_repo.assert_called_once_with() mock_find_conf.assert_called_once_with() mock_find_conf.return_value.set_main_option.assert_called_once_with( 'sqlalchemy.url', str(mock_get_engine.return_value.url), ) mock_is_migrate.assert_called_once_with(mock_get_engine.return_value, mock_find_repo.return_value) if has_migrate: mock_is_alembic.assert_called_once_with( mock_get_engine.return_value) else: mock_is_alembic.assert_not_called() # we should only attempt the upgrade of the remaining # sqlalchemy-migrate-based migrations and fake apply of the initial # alembic migrations if sqlalchemy-migrate is in place but alembic # hasn't been used yet if has_migrate and not has_alembic: mock_init.assert_called_once_with(mock_get_engine.return_value, mock_find_repo.return_value, mock_find_conf.return_value) else: mock_init.assert_not_called() # however, we should always attempt to upgrade the requested migration # to alembic mock_upgrade.assert_called_once_with(mock_get_engine.return_value, mock_find_conf.return_value, None)
def __init__(self, connection, sqlite_synchronous=True, soft_deletes=False): self.soft_deletes = soft_deletes volume_cmd.CONF.database.connection = connection volume_cmd.CONF.database.sqlite_synchronous = sqlite_synchronous # Suppress logging for migration migrate_logger = logging.getLogger('migrate') migrate_logger.setLevel(logging.WARNING) self._clear_facade() self.db_instance = db_api.oslo_db_api.DBAPI.from_config( conf=volume_cmd.CONF, backend_mapping=db_api._BACKEND_MAPPING, lazy=True) migration.db_sync() self._create_key_value_table() super(DBPersistence, self).__init__()
def sync(self, version=None): """Sync the database up to the most recent version.""" if version is not None and version > db.MAX_INT: print(_('Version should be less than or equal to ' '%(max_version)d.') % {'max_version': db.MAX_INT}) sys.exit(1) try: return db_migration.db_sync(version) except db_exc.DbMigrationError as ex: print("Error during database migration: %s" % ex) sys.exit(1)
def sync(self, version=None): """Sync the database up to the most recent version.""" if version is not None and version > db.MAX_INT: print(_('Version should be less than or equal to ' '%(max_version)d.') % {'max_version': db.MAX_INT}) sys.exit(1) try: return db_migration.db_sync(version) except db_exc.DBMigrationError as ex: print("Error during database migration: %s" % ex) sys.exit(1)
def test_mysql_innodb(self): """Test that table creation on mysql only builds InnoDB tables.""" # add this to the global lists to make reset work with it, it's removed # automatically in tearDown so no need to clean it up here. # sanity check migration.db_sync(engine=self.migrate_engine) total = self.migrate_engine.execute( "SELECT count(*) " "from information_schema.TABLES " "where TABLE_SCHEMA='{0}'".format( self.migrate_engine.url.database)) self.assertGreater(total.scalar(), 0, msg="No tables found. Wrong schema?") noninnodb = self.migrate_engine.execute( "SELECT count(*) " "from information_schema.TABLES " "where TABLE_SCHEMA='openstack_citest' " "and ENGINE!='InnoDB' " "and TABLE_NAME!='migrate_version'") count = noninnodb.scalar() self.assertEqual(count, 0, "%d non InnoDB tables created" % count)
def setup(): import mox # Fail fast if you don't have mox. Workaround for bug 810424 from cinder.db import migration from cinder.tests import fake_flags fake_flags.set_defaults(FLAGS) if FLAGS.sql_connection == "sqlite://": if migration.db_version() > 1: return else: testdb = os.path.join(FLAGS.state_path, FLAGS.sqlite_db) if os.path.exists(testdb): return migration.db_sync() if FLAGS.sql_connection == "sqlite://": global _DB engine = get_engine() conn = engine.connect() _DB = "".join(line for line in conn.connection.iterdump()) else: cleandb = os.path.join(FLAGS.state_path, FLAGS.sqlite_clean_db) shutil.copyfile(testdb, cleandb)
def sync(self, version=None): """Sync the database up to the most recent version.""" return db_migration.db_sync(version)
def test_db_version_alembic(self): migration.db_sync() head = alembic_script.ScriptDirectory.from_config( self.config, ).get_current_head() self.assertEqual(head, migration.db_version())