def _walk_versions(self, conf, initial_version=0): # Determine latest version script from the repo, then # upgrade from 1 through to the latest, with no data # in the databases. This just checks that the schema itself # upgrades successfully. # Assert we are not under version control... self.assertRaises(exception.DatabaseMigrationError, migration_api.db_version, conf) # Place the database under version control migration_api.version_control(conf, version=initial_version) cur_version = migration_api.db_version(conf) self.assertEqual(initial_version, cur_version) for version in xrange(initial_version + 1, TestMigrations.REPOSITORY.latest + 1): migration_api.db_sync(conf, version) cur_version = migration_api.db_version(conf) self.assertEqual(cur_version, version) # Now walk it back down to 0 from the latest, testing # the downgrade paths. for version in reversed( xrange(0, TestMigrations.REPOSITORY.latest)): migration_api.downgrade(conf, version) cur_version = migration_api.db_version(conf) self.assertEqual(cur_version, version)
def _walk_versions(self, conf): # Determine latest version script from the repo, then # upgrade from 1 through to the latest, with no data # in the databases. This just checks that the schema itself # upgrades successfully. # Assert we are not under version control... self.assertRaises(exception.DatabaseMigrationError, migration_api.db_version, conf) # Place the database under version control migration_api.version_control(conf) cur_version = migration_api.db_version(conf) self.assertEqual(0, cur_version) for version in xrange(1, TestMigrations.REPOSITORY.latest + 1): migration_api.upgrade(conf, version) cur_version = migration_api.db_version(conf) self.assertEqual(cur_version, version) # Now walk it back down to 0 from the latest, testing # the downgrade paths. for version in reversed(xrange(0, TestMigrations.REPOSITORY.latest)): migration_api.downgrade(conf, version) cur_version = migration_api.db_version(conf) self.assertEqual(cur_version, version)
def _check_no_data_loss_14_to_15(self, engine, conf): """ Check that migrating swift location credentials to quoted form and back does not result in data loss. """ migration_api.version_control(conf, version=0) migration_api.upgrade(conf, 14) conn = engine.connect() images_table = Table('images', MetaData(), autoload=True, autoload_with=engine) def get_locations(): conn = engine.connect() locations = [x[0] for x in conn.execute( select(['location'], from_obj=[images_table]))] conn.close() return locations unquoted_locations = [ 'swift://*****:*****@example.com/container/obj-id', 'file://foo', ] quoted_locations = [ 'swift://acct%3Ausr:[email protected]/container/obj-id', 'file://foo', ] # Insert images with an unquoted image location now = datetime.datetime.now() kwargs = dict( deleted=False, created_at=now, updated_at=now, status='active', is_public=True, min_disk=0, min_ram=0, ) for i, location in enumerate(unquoted_locations): kwargs.update(location=location, id=i) conn.execute(images_table.insert(), [kwargs]) conn.close() migration_api.upgrade(conf, 15) self.assertEqual(get_locations(), quoted_locations) migration_api.downgrade(conf, 14) self.assertEqual(get_locations(), unquoted_locations)
def configure_db(conf): """ Establish the database, create an engine if needed, and register the models. :param conf: Mapping of configuration options """ global _ENGINE, sa_logger, logger, _MAX_RETRIES, _RETRY_INTERVAL if not _ENGINE: for opt in db_opts: # avoid duplicate registration if not opt.name in conf: conf.register_opt(opt) sql_connection = conf.sql_connection _MAX_RETRIES = conf.sql_max_retries _RETRY_INTERVAL = conf.sql_retry_interval connection_dict = sqlalchemy.engine.url.make_url(sql_connection) engine_args = {'pool_recycle': conf.sql_idle_timeout, 'echo': False, 'convert_unicode': True } if 'mysql' in connection_dict.drivername: engine_args['listeners'] = [MySQLPingListener()] try: _ENGINE = create_engine(sql_connection, **engine_args) _ENGINE.connect = wrap_db_error(_ENGINE.connect) _ENGINE.connect() except Exception, err: msg = _("Error configuring registry database with supplied " "sql_connection '%(sql_connection)s'. " "Got error:\n%(err)s") % locals() logger.error(msg) raise sa_logger = logging.getLogger('sqlalchemy.engine') if conf.debug: sa_logger.setLevel(logging.DEBUG) elif conf.verbose: sa_logger.setLevel(logging.INFO) if conf.db_auto_create: logger.info('auto-creating glance registry DB') models.register_models(_ENGINE) try: migration.version_control(conf) except exception.DatabaseMigrationError: # only arises when the DB exists and is under version control pass else: logger.info('not auto-creating glance registry DB')
def configure_db(conf): """ Establish the database, create an engine if needed, and register the models. :param conf: Mapping of configuration options """ global _ENGINE, sa_logger, logger, _MAX_RETRIES, _RETRY_INTERVAL if not _ENGINE: conf.register_opts(db_opts) sql_connection = conf.sql_connection _MAX_RETRIES = conf.sql_max_retries _RETRY_INTERVAL = conf.sql_retry_interval connection_dict = sqlalchemy.engine.url.make_url(sql_connection) engine_args = {"pool_recycle": conf.sql_idle_timeout, "echo": False, "convert_unicode": True} if "mysql" in connection_dict.drivername: engine_args["listeners"] = [MySQLPingListener()] try: _ENGINE = create_engine(sql_connection, **engine_args) _ENGINE.connect = wrap_db_error(_ENGINE.connect) _ENGINE.connect() except Exception, err: msg = ( _( "Error configuring registry database with supplied " "sql_connection '%(sql_connection)s'. " "Got error:\n%(err)s" ) % locals() ) logger.error(msg) raise sa_logger = logging.getLogger("sqlalchemy.engine") if conf.debug: sa_logger.setLevel(logging.DEBUG) elif conf.verbose: sa_logger.setLevel(logging.INFO) models.register_models(_ENGINE) try: migration.version_control(conf) except exception.DatabaseMigrationError: # only arises when the DB exists and is under version control pass
def _no_data_loss_2_to_3_to_2(self, engine, conf): migration_api.version_control(conf, version=0) migration_api.upgrade(conf, 2) cur_version = migration_api.db_version(conf) self.assertEquals(2, cur_version) # We are now on version 2. Check that the images table does # not contain the type column... images_table = Table('images', MetaData(), autoload=True, autoload_with=engine) image_properties_table = Table('image_properties', MetaData(), autoload=True, autoload_with=engine) self.assertTrue('type' in images_table.c, "'type' column found in images table columns! " "images table columns: %s" % images_table.c.keys()) conn = engine.connect() sel = select([func.count("*")], from_obj=[images_table]) orig_num_images = conn.execute(sel).scalar() sel = select([func.count("*")], from_obj=[image_properties_table]) orig_num_image_properties = conn.execute(sel).scalar() now = datetime.datetime.now() inserter = images_table.insert() conn.execute(inserter, [ {'deleted': False, 'created_at': now, 'updated_at': now, 'type': 'kernel', 'status': 'active', 'is_public': True}, {'deleted': False, 'created_at': now, 'updated_at': now, 'type': 'ramdisk', 'status': 'active', 'is_public': True}]) sel = select([func.count("*")], from_obj=[images_table]) num_images = conn.execute(sel).scalar() self.assertEqual(orig_num_images + 2, num_images) conn.close() # Now let's upgrade to 3. This should move the type column # to the image_properties table as type properties. migration_api.upgrade(conf, 3) cur_version = migration_api.db_version(conf) self.assertEquals(3, cur_version) images_table = Table('images', MetaData(), autoload=True, autoload_with=engine) self.assertTrue('type' not in images_table.c, "'type' column not found in images table columns! " "images table columns reported by metadata: %s\n" % images_table.c.keys()) image_properties_table = Table('image_properties', MetaData(), autoload=True, autoload_with=engine) conn = engine.connect() sel = select([func.count("*")], from_obj=[image_properties_table]) num_image_properties = conn.execute(sel).scalar() self.assertEqual(orig_num_image_properties + 2, num_image_properties) conn.close() # Downgrade to 2 and check that the type properties were moved # to the main image table migration_api.downgrade(conf, 2) images_table = Table('images', MetaData(), autoload=True, autoload_with=engine) self.assertTrue('type' in images_table.c, "'type' column found in images table columns! " "images table columns: %s" % images_table.c.keys()) image_properties_table = Table('image_properties', MetaData(), autoload=True, autoload_with=engine) conn = engine.connect() sel = select([func.count("*")], from_obj=[image_properties_table]) last_num_image_properties = conn.execute(sel).scalar() self.assertEqual(num_image_properties - 2, last_num_image_properties)
def _no_data_loss_2_to_3_to_2(self, engine, conf): migration_api.version_control(conf) migration_api.upgrade(conf, 2) cur_version = migration_api.db_version(conf) self.assertEquals(2, cur_version) # We are now on version 2. Check that the images table does # not contain the type column... images_table = Table('images', MetaData(), autoload=True, autoload_with=engine) image_properties_table = Table('image_properties', MetaData(), autoload=True, autoload_with=engine) self.assertTrue( 'type' in images_table.c, "'type' column found in images table columns! " "images table columns: %s" % images_table.c.keys()) conn = engine.connect() sel = select([func.count("*")], from_obj=[images_table]) orig_num_images = conn.execute(sel).scalar() sel = select([func.count("*")], from_obj=[image_properties_table]) orig_num_image_properties = conn.execute(sel).scalar() now = datetime.datetime.now() inserter = images_table.insert() conn.execute(inserter, [{ 'deleted': False, 'created_at': now, 'updated_at': now, 'type': 'kernel', 'status': 'active', 'is_public': True }, { 'deleted': False, 'created_at': now, 'updated_at': now, 'type': 'ramdisk', 'status': 'active', 'is_public': True }]) sel = select([func.count("*")], from_obj=[images_table]) num_images = conn.execute(sel).scalar() self.assertEqual(orig_num_images + 2, num_images) conn.close() # Now let's upgrade to 3. This should move the type column # to the image_properties table as type properties. migration_api.upgrade(conf, 3) cur_version = migration_api.db_version(conf) self.assertEquals(3, cur_version) images_table = Table('images', MetaData(), autoload=True, autoload_with=engine) self.assertTrue( 'type' not in images_table.c, "'type' column not found in images table columns! " "images table columns reported by metadata: %s\n" % images_table.c.keys()) image_properties_table = Table('image_properties', MetaData(), autoload=True, autoload_with=engine) conn = engine.connect() sel = select([func.count("*")], from_obj=[image_properties_table]) num_image_properties = conn.execute(sel).scalar() self.assertEqual(orig_num_image_properties + 2, num_image_properties) conn.close() # Downgrade to 2 and check that the type properties were moved # to the main image table migration_api.downgrade(conf, 2) images_table = Table('images', MetaData(), autoload=True, autoload_with=engine) self.assertTrue( 'type' in images_table.c, "'type' column found in images table columns! " "images table columns: %s" % images_table.c.keys()) image_properties_table = Table('image_properties', MetaData(), autoload=True, autoload_with=engine) conn = engine.connect() sel = select([func.count("*")], from_obj=[image_properties_table]) last_num_image_properties = conn.execute(sel).scalar() self.assertEqual(num_image_properties - 2, last_num_image_properties)