def _check_ocata_expand01(self, engine, data): images = db_utils.get_table(engine, 'images') # check that visibility is null for existing images rows = (images.select() .order_by(images.c.id) .execute() .fetchall()) self.assertEqual(4, len(rows)) for row in rows: self.assertIsNone(row['visibility']) # run data migrations data_migrations.migrate(engine) # check that visibility is set appropriately for all images rows = (images.select() .order_by(images.c.id) .execute() .fetchall()) self.assertEqual(4, len(rows)) # private_id_1 has private visibility self.assertEqual('private_id_1', rows[0]['id']) self.assertEqual('private', rows[0]['visibility']) # private_id_2 has private visibility self.assertEqual('private_id_2', rows[1]['id']) self.assertEqual('private', rows[1]['visibility']) # public_id has public visibility self.assertEqual('public_id', rows[2]['id']) self.assertEqual('public', rows[2]['visibility']) # shared_id has shared visibility self.assertEqual('shared_id', rows[3]['id']) self.assertEqual('shared', rows[3]['visibility'])
def _pre_upgrade_ocata_contract01(self, engine): images = db_utils.get_table(engine, 'images') now = datetime.datetime.now() self.assertIn('is_public', images.c) self.assertIn('visibility', images.c) self.assertTrue(images.c.is_public.nullable) self.assertTrue(images.c.visibility.nullable) # inserting a public image record public_temp = dict(deleted=False, created_at=now, status='active', is_public=True, min_disk=0, min_ram=0, id='public_id_before_expand') with engine.connect() as conn, conn.begin(): conn.execute(images.insert().values(public_temp)) # inserting a private image record shared_temp = dict(deleted=False, created_at=now, status='active', is_public=False, min_disk=0, min_ram=0, id='private_id_before_expand') with engine.connect() as conn, conn.begin(): conn.execute(images.insert().values(shared_temp)) data_migrations.migrate(engine=engine, release='ocata')
def _check_ocata_expand01(self, engine, data): images = db_utils.get_table(engine, 'images') # check that visibility is null for existing images rows = (images.select() .order_by(images.c.id) .execute() .fetchall()) self.assertEqual(4, len(rows)) for row in rows: self.assertIsNone(row['visibility']) # run data migrations data_migrations.migrate(engine) # check that visibility is set appropriately for all images rows = (images.select() .order_by(images.c.id) .execute() .fetchall()) self.assertEqual(4, len(rows)) # private_id_1 has private visibility self.assertEqual('private_id_1', rows[0]['id']) # TODO(rosmaita): bug #1745003 # self.assertEqual('private', rows[0]['visibility']) # private_id_2 has private visibility self.assertEqual('private_id_2', rows[1]['id']) # TODO(rosmaita): bug #1745003 # self.assertEqual('private', rows[1]['visibility']) # public_id has public visibility self.assertEqual('public_id', rows[2]['id']) # TODO(rosmaita): bug #1745003 # self.assertEqual('public', rows[2]['visibility']) # shared_id has shared visibility self.assertEqual('shared_id', rows[3]['id'])
def _pre_upgrade_ocata_contract01(self, engine): images = db_utils.get_table(engine, 'images') now = datetime.datetime.now() self.assertIn('is_public', images.c) self.assertIn('visibility', images.c) self.assertTrue(images.c.is_public.nullable) self.assertTrue(images.c.visibility.nullable) # inserting a public image record public_temp = dict(deleted=False, created_at=now, status='active', is_public=True, min_disk=0, min_ram=0, id='public_id_before_expand') images.insert().values(public_temp).execute() # inserting a private image record shared_temp = dict(deleted=False, created_at=now, status='active', is_public=False, min_disk=0, min_ram=0, id='private_id_before_expand') images.insert().values(shared_temp).execute() data_migrations.migrate(engine=engine, release='ocata')
def _check_train_expand01(self, engine, data): image_locations = db_utils.get_table(engine, 'image_locations') # check that meta_data has 'backend' key for existing image_locations with engine.connect() as conn: rows = conn.execute( image_locations.select().order_by(image_locations.c.id) ).fetchall() self.assertEqual(2, len(rows)) for row in rows: self.assertIn('"backend":', row['meta_data']) # run data migrations data_migrations.migrate(engine, release='train') # check that meta_data has 'backend' key replaced with 'store' with engine.connect() as conn: rows = conn.execute( image_locations.select().order_by(image_locations.c.id) ).fetchall() self.assertEqual(2, len(rows)) for row in rows: self.assertNotIn('"backend":', row['meta_data']) self.assertIn('"store":', row['meta_data'])
def _check_train_expand01(self, engine, data): images = db_utils.get_table(engine, 'images') # check that there are no rows in the images table rows = (images.select().order_by(images.c.id).execute().fetchall()) self.assertEqual(0, len(rows)) # run data migrations data_migrations.migrate(engine)
def _check_ocata_expand01(self, engine, data): images = db_utils.get_table(engine, 'images') # check that there are no rows in the images table with engine.connect() as conn: rows = conn.execute(images.select().order_by( images.c.id)).fetchall() self.assertEqual(0, len(rows)) # run data migrations data_migrations.migrate(engine)
def _check_ocata_expand01(self, engine, data): images = db_utils.get_table(engine, 'images') # check that there are no rows in the images table rows = (images.select() .order_by(images.c.id) .execute() .fetchall()) self.assertEqual(0, len(rows)) # run data migrations data_migrations.migrate(engine)
def migrate(self, online_migration=True): """Run the data migration phase of a database migration.""" if online_migration: self._validate_engine(db_api.get_engine()) curr_heads = alembic_migrations.get_current_alembic_heads() contract_head = alembic_migrations.get_alembic_branch_head( db_migration.CONTRACT_BRANCH) if (contract_head in curr_heads): print(_('Database is up to date. No migrations needed.')) sys.exit() expand_head = alembic_migrations.get_alembic_branch_head( db_migration.EXPAND_BRANCH) if expand_head not in curr_heads: sys.exit( _('Data migration did not run. Data migration cannot be ' 'run before database expansion. Run database ' 'expansion first using "glance-manage db expand"')) if data_migrations.has_pending_migrations(db_api.get_engine()): rows_migrated = data_migrations.migrate(db_api.get_engine()) print(_('Migrated %s rows') % rows_migrated) else: print(_('Database migration is up to date. No migration needed.'))
def test_migrate(self, mock_iter, mock_import): def fake_iter_modules(blah): yield 'blah', 'zebra01', 'blah' yield 'blah', 'zebra02', 'blah' yield 'blah', 'yellow01', 'blah' yield 'blah', 'xray01', 'blah' yield 'blah', 'xray02', 'blah' mock_iter.side_effect = fake_iter_modules zebra1 = mock.Mock() zebra1.has_migrations.return_value = True zebra1.migrate.return_value = 100 zebra2 = mock.Mock() zebra2.has_migrations.return_value = True zebra2.migrate.return_value = 50 fake_imported_modules = [zebra1, zebra2] mock_import.side_effect = fake_imported_modules engine = mock.Mock() actual = data_migrations.migrate(engine, 'zebra') self.assertEqual(150, actual) zebra1.has_migrations.assert_called_once_with(engine) zebra1.migrate.assert_called_once_with(engine) zebra2.has_migrations.assert_called_once_with(engine) zebra2.migrate.assert_called_once_with(engine)
def test_migrate(self, mock_iter, mock_import): def fake_iter_modules(blah): yield 'blah', 'ocata01', 'blah' yield 'blah', 'ocata02', 'blah' yield 'blah', 'pike01', 'blah' yield 'blah', 'newton', 'blah' yield 'blah', 'mitaka456', 'blah' mock_iter.side_effect = fake_iter_modules ocata1 = mock.Mock() ocata1.has_migrations.return_value = True ocata1.migrate.return_value = 100 ocata2 = mock.Mock() ocata2.has_migrations.return_value = True ocata2.migrate.return_value = 50 fake_imported_modules = [ocata1, ocata2] mock_import.side_effect = fake_imported_modules engine = mock.Mock() actual = data_migrations.migrate(engine) self.assertEqual(150, actual) ocata1.has_migrations.assert_called_once_with(engine) ocata1.migrate.assert_called_once_with(engine) ocata2.has_migrations.assert_called_once_with(engine) ocata2.migrate.assert_called_once_with(engine)
def migrate(self): engine = db_api.get_engine() if engine.engine.name != 'mysql': sys.exit( _('Rolling upgrades are currently supported only for ' 'MySQL')) curr_heads = alembic_migrations.get_current_alembic_heads() expand_head = alembic_migrations.get_alembic_branch_head( db_migration.EXPAND_BRANCH) if expand_head not in curr_heads: sys.exit( _('Data migration did not run. Data migration cannot be ' 'run before database expansion. Run database ' 'expansion first using "glance-manage db expand"')) rows_migrated = data_migrations.migrate(db_api.get_engine()) print(_('Migrated %s rows') % rows_migrated)