def _walk_versions(self, initial_version=0): # Determine latest version script from the repo, then # upgrade from 1 through to the latest, with no data # in the databases. This just checks that the schema itself # upgrades successfully. # Assert we are not under version control... self.assertRaises(exception.DatabaseMigrationError, migration_api.db_version) # Place the database under version control migration_api.version_control(version=initial_version) cur_version = migration_api.db_version() self.assertEqual(initial_version, cur_version) for version in xrange(initial_version + 1, TestMigrations.REPOSITORY.latest + 1): migration_api.db_sync(version) cur_version = migration_api.db_version() self.assertEqual(cur_version, version) # Now walk it back down to 0 from the latest, testing # the downgrade paths. for version in reversed(xrange(0, TestMigrations.REPOSITORY.latest)): migration_api.downgrade(version) cur_version = migration_api.db_version() self.assertEqual(cur_version, version)
def test_migration_19(self): for key, engine in self.engines.items(): self.config(sql_connection=TestMigrations.TEST_DATABASES[key]) migration_api.version_control(version=0) migration_api.upgrade(18) images_table = Table('images', MetaData(engine), autoload=True) now = datetime.datetime.now() base_values = { 'deleted': False, 'created_at': now, 'updated_at': now, 'status': 'active', 'is_public': True, 'min_disk': 0, 'min_ram': 0, } images = [ {'id': 1, 'location': 'http://glance.example.com'}, #NOTE(bcwaldon): images with a location of None should # not be migrated {'id': 2, 'location': None}, ] map(lambda image: image.update(base_values), images) for image in images: images_table.insert().values(image).execute() migration_api.upgrade(19) image_locations_table = Table('image_locations', MetaData(engine), autoload=True) records = image_locations_table.select().execute().fetchall() self.assertEqual(len(records), 1) locations = dict([(il.image_id, il.value) for il in records]) self.assertEqual({'1': 'http://glance.example.com'}, locations) image_locations_table = Table('image_locations', MetaData(engine), autoload=True) image_locations_table.update()\ .where(image_locations_table.c.image_id == 1)\ .values(value='http://swift.example.com')\ .execute() migration_api.downgrade(18) images_table = Table('images', MetaData(engine), autoload=True) records = images_table.select().execute().fetchall() self.assertEqual(len(records), 2) locations = dict([(i.id, i.location) for i in records]) self.assertEqual({'1': 'http://swift.example.com', '2': None}, locations)
def _check_no_data_loss_14_to_15(self, engine): """ Check that migrating swift location credentials to quoted form and back does not result in data loss. """ migration_api.version_control(version=0) migration_api.upgrade(14) conn = engine.connect() images_table = Table('images', MetaData(), autoload=True, autoload_with=engine) def get_locations(): conn = engine.connect() locations = [ x[0] for x in conn.execute( select(['location'], from_obj=[images_table])) ] conn.close() return locations unquoted_locations = [ 'swift://*****:*****@example.com/container/obj-id', 'file://foo', ] quoted_locations = [ 'swift://acct%3Ausr:[email protected]/container/obj-id', 'file://foo', ] # Insert images with an unquoted image location now = datetime.datetime.now() kwargs = dict( deleted=False, created_at=now, updated_at=now, status='active', is_public=True, min_disk=0, min_ram=0, ) for i, location in enumerate(unquoted_locations): kwargs.update(location=location, id=i) conn.execute(images_table.insert(), [kwargs]) conn.close() migration_api.upgrade(15) self.assertEqual(get_locations(), quoted_locations) migration_api.downgrade(14) self.assertEqual(get_locations(), unquoted_locations)
def _check_16_to_17(self, engine): """ Check that migrating swift location credentials to quoted form and back works. """ migration_api.version_control(version=0) migration_api.upgrade(16) conn = engine.connect() images_table = Table('images', MetaData(), autoload=True, autoload_with=engine) def get_locations(): conn = engine.connect() locations = [x[0] for x in conn.execute( select(['location'], from_obj=[images_table]))] conn.close() return locations unquoted = 'swift://*****:*****@example.com/container/obj-id' encrypted_unquoted = crypt.urlsafe_encrypt( self.metadata_encryption_key, unquoted, 64) quoted = 'swift://acct%3Ausr:[email protected]/container/obj-id' # Insert image with an unquoted image location now = datetime.datetime.now() kwargs = dict(deleted=False, created_at=now, updated_at=now, status='active', is_public=True, min_disk=0, min_ram=0) kwargs.update(location=encrypted_unquoted, id=1) conn.execute(images_table.insert(), [kwargs]) conn.close() migration_api.upgrade(17) actual_location = crypt.urlsafe_decrypt(self.metadata_encryption_key, get_locations()[0]) self.assertEqual(actual_location, quoted) migration_api.downgrade(16) actual_location = crypt.urlsafe_decrypt(self.metadata_encryption_key, get_locations()[0]) self.assertEqual(actual_location, unquoted)
def _check_no_data_loss_14_to_15(self, engine): """ Check that migrating swift location credentials to quoted form and back does not result in data loss. """ migration_api.version_control(version=0) migration_api.upgrade(14) conn = engine.connect() images_table = Table('images', MetaData(), autoload=True, autoload_with=engine) def get_locations(): conn = engine.connect() locations = [x[0] for x in conn.execute( select(['location'], from_obj=[images_table]))] conn.close() return locations unquoted_locations = [ 'swift://*****:*****@example.com/container/obj-id', 'file://foo', ] quoted_locations = [ 'swift://acct%3Ausr:[email protected]/container/obj-id', 'file://foo', ] # Insert images with an unquoted image location now = datetime.datetime.now() kwargs = dict( deleted=False, created_at=now, updated_at=now, status='active', is_public=True, min_disk=0, min_ram=0, ) for i, location in enumerate(unquoted_locations): kwargs.update(location=location, id=i) conn.execute(images_table.insert(), [kwargs]) conn.close() migration_api.upgrade(15) self.assertEqual(get_locations(), quoted_locations) migration_api.downgrade(14) self.assertEqual(get_locations(), unquoted_locations)
def _check_no_data_loss_16_to_17(self, engine): """ Check that migrating swift location credentials to quoted form and back does not result in data loss. """ migration_api.version_control(version=0) migration_api.upgrade(16) conn = engine.connect() images_table = Table('images', MetaData(), autoload=True, autoload_with=engine) def get_locations(): conn = engine.connect() locations = [x[0] for x in conn.execute( select(['location'], from_obj=[images_table]))] conn.close() return locations locations = ['file://ab', 'file://abc', 'swift://acct3A%foobar:[email protected]/container/obj-id'] # Insert images with an unquoted image location now = datetime.datetime.now() kwargs = dict(deleted=False, created_at=now, updated_at=now, status='active', is_public=True, min_disk=0, min_ram=0) for i, location in enumerate(locations): kwargs.update(location=location, id=i) conn.execute(images_table.insert(), [kwargs]) conn.close() def assert_locations(): actual_locations = get_locations() for location in locations: if not location in actual_locations: self.fail(_("location: %s data lost") % location) migration_api.upgrade(17) assert_locations() migration_api.downgrade(16) assert_locations()
def get_engine(): """Return a SQLAlchemy engine.""" """May assign _ENGINE if not already assigned""" global _ENGINE, sa_logger, _CONNECTION, _IDLE_TIMEOUT, _MAX_RETRIES,\ _RETRY_INTERVAL if not _ENGINE: tries = _MAX_RETRIES retry_interval = _RETRY_INTERVAL connection_dict = sqlalchemy.engine.url.make_url(_CONNECTION) engine_args = { 'pool_recycle': _IDLE_TIMEOUT, 'echo': False, 'convert_unicode': True } try: _ENGINE = sqlalchemy.create_engine(_CONNECTION, **engine_args) if 'mysql' in connection_dict.drivername: sqlalchemy.event.listen(_ENGINE, 'checkout', _ping_listener) _ENGINE.connect = _wrap_db_error(_ENGINE.connect) _ENGINE.connect() except Exception as err: msg = _("Error configuring registry database with supplied " "sql_connection. Got error: %s") % err LOG.error(msg) raise sa_logger = logging.getLogger('sqlalchemy.engine') if CONF.sqlalchemy_debug: sa_logger.setLevel(logging.DEBUG) if CONF.db_auto_create: LOG.info(_('auto-creating glance registry DB')) models.register_models(_ENGINE) try: migration.version_control() except exception.DatabaseMigrationError: # only arises when the DB exists and is under version control pass else: LOG.info(_('not auto-creating glance registry DB')) return _ENGINE
def get_engine(): """Return a SQLAlchemy engine.""" """May assign _ENGINE if not already assigned""" global _ENGINE, sa_logger, _CONNECTION, _IDLE_TIMEOUT, _MAX_RETRIES,\ _RETRY_INTERVAL if not _ENGINE: tries = _MAX_RETRIES retry_interval = _RETRY_INTERVAL connection_dict = sqlalchemy.engine.url.make_url(_CONNECTION) engine_args = { 'pool_recycle': _IDLE_TIMEOUT, 'echo': False, 'convert_unicode': True} try: _ENGINE = sqlalchemy.create_engine(_CONNECTION, **engine_args) if 'mysql' in connection_dict.drivername: sqlalchemy.event.listen(_ENGINE, 'checkout', _ping_listener) _ENGINE.connect = _wrap_db_error(_ENGINE.connect) _ENGINE.connect() except Exception as err: msg = _("Error configuring registry database with supplied " "sql_connection. Got error: %s") % err LOG.error(msg) raise sa_logger = logging.getLogger('sqlalchemy.engine') if CONF.debug: sa_logger.setLevel(logging.DEBUG) if CONF.db_auto_create: LOG.info(_('auto-creating glance registry DB')) models.register_models(_ENGINE) try: migration.version_control() except exception.DatabaseMigrationError: # only arises when the DB exists and is under version control pass else: LOG.info(_('not auto-creating glance registry DB')) return _ENGINE
def configure_db(): """ Establish the database, create an engine if needed, and register the models. """ global _ENGINE, sa_logger, _MAX_RETRIES, _RETRY_INTERVAL if not _ENGINE: sql_connection = CONF.sql_connection _MAX_RETRIES = CONF.sql_max_retries _RETRY_INTERVAL = CONF.sql_retry_interval connection_dict = sqlalchemy.engine.url.make_url(sql_connection) engine_args = {"pool_recycle": CONF.sql_idle_timeout, "echo": False, "convert_unicode": True} try: _ENGINE = sqlalchemy.create_engine(sql_connection, **engine_args) if "mysql" in connection_dict.drivername: sqlalchemy.event.listen(_ENGINE, "checkout", ping_listener) _ENGINE.connect = wrap_db_error(_ENGINE.connect) _ENGINE.connect() except Exception, err: msg = ( _( "Error configuring registry database with supplied " "sql_connection '%(sql_connection)s'. " "Got error:\n%(err)s" ) % locals() ) LOG.error(msg) raise sa_logger = logging.getLogger("sqlalchemy.engine") if CONF.debug: sa_logger.setLevel(logging.DEBUG) if CONF.db_auto_create: LOG.info("auto-creating glance registry DB") models.register_models(_ENGINE) try: migration.version_control() except exception.DatabaseMigrationError: # only arises when the DB exists and is under version control pass else: LOG.info("not auto-creating glance registry DB")
def configure_db(): """ Establish the database, create an engine if needed, and register the models. """ global _ENGINE, sa_logger, _MAX_RETRIES, _RETRY_INTERVAL if not _ENGINE: sql_connection = CONF.sql_connection _MAX_RETRIES = CONF.sql_max_retries _RETRY_INTERVAL = CONF.sql_retry_interval connection_dict = sqlalchemy.engine.url.make_url(sql_connection) engine_args = { 'pool_recycle': CONF.sql_idle_timeout, 'echo': False, 'convert_unicode': True } try: _ENGINE = sqlalchemy.create_engine(sql_connection, **engine_args) if 'mysql' in connection_dict.drivername: sqlalchemy.event.listen(_ENGINE, 'checkout', ping_listener) _ENGINE.connect = wrap_db_error(_ENGINE.connect) _ENGINE.connect() except Exception, err: msg = _("Error configuring registry database with supplied " "sql_connection '%(sql_connection)s'. " "Got error:\n%(err)s") % locals() LOG.error(msg) raise sa_logger = logging.getLogger('sqlalchemy.engine') if CONF.debug: sa_logger.setLevel(logging.DEBUG) if CONF.db_auto_create: LOG.info('auto-creating glance registry DB') models.register_models(_ENGINE) try: migration.version_control() except exception.DatabaseMigrationError: # only arises when the DB exists and is under version control pass else: LOG.info('not auto-creating glance registry DB')
def test_migration_20(self): for key, engine in self.engines.items(): self.config(sql_connection=TestMigrations.TEST_DATABASES[key]) migration_api.version_control(version=0) migration_api.upgrade(19) images_table = Table('images', MetaData(engine), autoload=True) self.assertTrue('location' in images_table.c) migration_api.upgrade(20) images_table = Table('images', MetaData(engine), autoload=True) self.assertFalse('location' in images_table.c) migration_api.downgrade(19) images_table = Table('images', MetaData(engine), autoload=True) self.assertTrue('location' in images_table.c)
def version_control(self, version=None): """Place a database under migration control""" migration.version_control(version)
def _no_data_loss_15_to_16_to_15(self, engine): migration_api.version_control(version=0) migration_api.upgrade(15) cur_version = migration_api.db_version() self.assertEquals(15, cur_version) # We are now on version 15. image_members_table = Table('image_members', MetaData(), autoload=True, autoload_with=engine) self.assertTrue('status' not in image_members_table.c, "'status' not column found in image_members table " "columns! image_members table columns: %s" % image_members_table.c.keys()) conn = engine.connect() sel = select([func.count("*")], from_obj=[image_members_table]) orig_num_image_members = conn.execute(sel).scalar() now = datetime.datetime.now() inserter = image_members_table.insert() conn.execute(inserter, [ {'deleted': False, 'created_at': now, 'member': 'fake-member', 'updated_at': now, 'can_share': False, 'image_id': 'fake-image-id1'}]) sel = select([func.count("*")], from_obj=[image_members_table]) num_image_members = conn.execute(sel).scalar() self.assertEqual(orig_num_image_members + 1, num_image_members) conn.close() #Upgrade to version 16 migration_api.upgrade(16) cur_version = migration_api.db_version() self.assertEquals(16, cur_version) image_members_table = Table('image_members', MetaData(), autoload=True, autoload_with=engine) self.assertTrue('status' in image_members_table.c, "'status' column found in image_members table " "columns! image_members table columns: %s" % image_members_table.c.keys()) conn = engine.connect() sel = select([func.count("*")], from_obj=[image_members_table]) num_image_members = conn.execute(sel).scalar() self.assertEqual(orig_num_image_members + 1, num_image_members) now = datetime.datetime.now() inserter = image_members_table.insert() conn.execute(inserter, [ {'deleted': False, 'created_at': now, 'member': 'fake-member', 'updated_at': now, 'can_share': False, 'status': 'pending', 'image_id': 'fake-image-id2'}]) sel = select([func.count("*")], from_obj=[image_members_table]) num_image_members = conn.execute(sel).scalar() self.assertEqual(orig_num_image_members + 2, num_image_members) conn.close() #Downgrade to version 15 migration_api.downgrade(15) cur_version = migration_api.db_version() self.assertEquals(15, cur_version) image_members_table = Table('image_members', MetaData(), autoload=True, autoload_with=engine) self.assertTrue('status' not in image_members_table.c, "'status' column not found in image_members table " "columns! image_members table columns: %s" % image_members_table.c.keys()) conn = engine.connect() sel = select([func.count("*")], from_obj=[image_members_table]) num_image_members = conn.execute(sel).scalar() self.assertEqual(orig_num_image_members + 2, num_image_members) conn.close()
def _no_data_loss_2_to_3_to_2(self, engine): migration_api.version_control(version=0) migration_api.upgrade(2) cur_version = migration_api.db_version() self.assertEquals(2, cur_version) # We are now on version 2. Check that the images table does # not contain the type column... images_table = Table('images', MetaData(), autoload=True, autoload_with=engine) image_properties_table = Table('image_properties', MetaData(), autoload=True, autoload_with=engine) self.assertTrue( 'type' in images_table.c, "'type' column found in images table columns! " "images table columns: %s" % images_table.c.keys()) conn = engine.connect() sel = select([func.count("*")], from_obj=[images_table]) orig_num_images = conn.execute(sel).scalar() sel = select([func.count("*")], from_obj=[image_properties_table]) orig_num_image_properties = conn.execute(sel).scalar() now = datetime.datetime.now() inserter = images_table.insert() conn.execute(inserter, [{ 'deleted': False, 'created_at': now, 'updated_at': now, 'type': 'kernel', 'status': 'active', 'is_public': True }, { 'deleted': False, 'created_at': now, 'updated_at': now, 'type': 'ramdisk', 'status': 'active', 'is_public': True }]) sel = select([func.count("*")], from_obj=[images_table]) num_images = conn.execute(sel).scalar() self.assertEqual(orig_num_images + 2, num_images) conn.close() # Now let's upgrade to 3. This should move the type column # to the image_properties table as type properties. migration_api.upgrade(3) cur_version = migration_api.db_version() self.assertEquals(3, cur_version) images_table = Table('images', MetaData(), autoload=True, autoload_with=engine) self.assertTrue( 'type' not in images_table.c, "'type' column not found in images table columns! " "images table columns reported by metadata: %s\n" % images_table.c.keys()) image_properties_table = Table('image_properties', MetaData(), autoload=True, autoload_with=engine) conn = engine.connect() sel = select([func.count("*")], from_obj=[image_properties_table]) num_image_properties = conn.execute(sel).scalar() self.assertEqual(orig_num_image_properties + 2, num_image_properties) conn.close() # Downgrade to 2 and check that the type properties were moved # to the main image table migration_api.downgrade(2) images_table = Table('images', MetaData(), autoload=True, autoload_with=engine) self.assertTrue( 'type' in images_table.c, "'type' column found in images table columns! " "images table columns: %s" % images_table.c.keys()) image_properties_table = Table('image_properties', MetaData(), autoload=True, autoload_with=engine) conn = engine.connect() sel = select([func.count("*")], from_obj=[image_properties_table]) last_num_image_properties = conn.execute(sel).scalar() self.assertEqual(num_image_properties - 2, last_num_image_properties)
def _no_data_loss_2_to_3_to_2(self, engine): migration_api.version_control(version=0) migration_api.upgrade(2) cur_version = migration_api.db_version() self.assertEquals(2, cur_version) # We are now on version 2. Check that the images table does # not contain the type column... images_table = Table('images', MetaData(), autoload=True, autoload_with=engine) image_properties_table = Table('image_properties', MetaData(), autoload=True, autoload_with=engine) self.assertTrue('type' in images_table.c, "'type' column found in images table columns! " "images table columns: %s" % images_table.c.keys()) conn = engine.connect() sel = select([func.count("*")], from_obj=[images_table]) orig_num_images = conn.execute(sel).scalar() sel = select([func.count("*")], from_obj=[image_properties_table]) orig_num_image_properties = conn.execute(sel).scalar() now = datetime.datetime.now() inserter = images_table.insert() conn.execute(inserter, [ {'deleted': False, 'created_at': now, 'updated_at': now, 'type': 'kernel', 'status': 'active', 'is_public': True}, {'deleted': False, 'created_at': now, 'updated_at': now, 'type': 'ramdisk', 'status': 'active', 'is_public': True}]) sel = select([func.count("*")], from_obj=[images_table]) num_images = conn.execute(sel).scalar() self.assertEqual(orig_num_images + 2, num_images) conn.close() # Now let's upgrade to 3. This should move the type column # to the image_properties table as type properties. migration_api.upgrade(3) cur_version = migration_api.db_version() self.assertEquals(3, cur_version) images_table = Table('images', MetaData(), autoload=True, autoload_with=engine) self.assertTrue('type' not in images_table.c, "'type' column not found in images table columns! " "images table columns reported by metadata: %s\n" % images_table.c.keys()) image_properties_table = Table('image_properties', MetaData(), autoload=True, autoload_with=engine) conn = engine.connect() sel = select([func.count("*")], from_obj=[image_properties_table]) num_image_properties = conn.execute(sel).scalar() self.assertEqual(orig_num_image_properties + 2, num_image_properties) conn.close() # Downgrade to 2 and check that the type properties were moved # to the main image table migration_api.downgrade(2) images_table = Table('images', MetaData(), autoload=True, autoload_with=engine) self.assertTrue('type' in images_table.c, "'type' column found in images table columns! " "images table columns: %s" % images_table.c.keys()) image_properties_table = Table('image_properties', MetaData(), autoload=True, autoload_with=engine) conn = engine.connect() sel = select([func.count("*")], from_obj=[image_properties_table]) last_num_image_properties = conn.execute(sel).scalar() self.assertEqual(num_image_properties - 2, last_num_image_properties)