def downgrade(migrate_engine): actions_events = utils.get_table(migrate_engine, 'instance_actions_events') actions_events.drop_column('host') actions_events.drop_column('details') shadow_actions_events = utils.get_table(migrate_engine, api._SHADOW_TABLE_PREFIX + 'instance_actions_events') shadow_actions_events.drop_column('host') shadow_actions_events.drop_column('details')
def downgrade(migrate_engine): actions_events = utils.get_table(migrate_engine, 'instance_actions_events') actions_events.drop_column('host') actions_events.drop_column('details') shadow_actions_events = utils.get_table( migrate_engine, api._SHADOW_TABLE_PREFIX + 'instance_actions_events') shadow_actions_events.drop_column('host') shadow_actions_events.drop_column('details')
def _post_downgrade_247(self, engine): quota_usages = oslodbutils.get_table(engine, 'quota_usages') self.assertTrue(quota_usages.c.resource.nullable) pci_devices = oslodbutils.get_table(engine, 'pci_devices') self.assertFalse(pci_devices.c.deleted.nullable) self.assertTrue(pci_devices.c.product_id.nullable) self.assertTrue(pci_devices.c.vendor_id.nullable) self.assertTrue(pci_devices.c.dev_type.nullable)
def upgrade(migrate_engine): actions_events = utils.get_table(migrate_engine, 'instance_actions_events') host = Column('host', String(255)) details = Column('details', Text) actions_events.create_column(host) actions_events.create_column(details) shadow_actions_events = utils.get_table( migrate_engine, api._SHADOW_TABLE_PREFIX + 'instance_actions_events') shadow_actions_events.create_column(host.copy()) shadow_actions_events.create_column(details.copy())
def upgrade(migrate_engine): actions_events = utils.get_table(migrate_engine, 'instance_actions_events') host = Column('host', String(255)) details = Column('details', Text) actions_events.create_column(host) actions_events.create_column(details) shadow_actions_events = utils.get_table(migrate_engine, api._SHADOW_TABLE_PREFIX + 'instance_actions_events') shadow_actions_events.create_column(host.copy()) shadow_actions_events.create_column(details.copy())
def _check_251(self, engine, data): self.assertColumnExists(engine, 'compute_nodes', 'numa_topology') self.assertColumnExists(engine, 'shadow_compute_nodes', 'numa_topology') compute_nodes = oslodbutils.get_table(engine, 'compute_nodes') shadow_compute_nodes = oslodbutils.get_table(engine, 'shadow_compute_nodes') self.assertIsInstance(compute_nodes.c.numa_topology.type, sqlalchemy.types.Text) self.assertIsInstance(shadow_compute_nodes.c.numa_topology.type, sqlalchemy.types.Text)
def _check_251(self, engine, data): self.assertColumnExists(engine, 'compute_nodes', 'numa_topology') self.assertColumnExists( engine, 'shadow_compute_nodes', 'numa_topology') compute_nodes = oslodbutils.get_table(engine, 'compute_nodes') shadow_compute_nodes = oslodbutils.get_table( engine, 'shadow_compute_nodes') self.assertIsInstance(compute_nodes.c.numa_topology.type, sqlalchemy.types.Text) self.assertIsInstance(shadow_compute_nodes.c.numa_topology.type, sqlalchemy.types.Text)
def _check_249(self, engine, data): # Assert that only one index exists that covers columns # instance_uuid and device_name bdm = oslodbutils.get_table(engine, 'block_device_mapping') self.assertEqual(1, len([i for i in bdm.indexes if [c.name for c in i.columns] == ['instance_uuid', 'device_name']]))
def _check_231(self, engine, data): self.assertColumnExists(engine, 'instances', 'ephemeral_key_uuid') instances = oslodbutils.get_table(engine, 'instances') self.assertIsInstance(instances.c.ephemeral_key_uuid.type, sqlalchemy.types.String) self.assertTrue(db_utils.check_shadow_table(engine, 'instances'))
def _post_downgrade_249(self, engine): # The duplicate index is not created on downgrade, so this # asserts that only one index exists that covers columns # instance_uuid and device_name bdm = oslodbutils.get_table(engine, 'block_device_mapping') self.assertEqual(1, len([i for i in bdm.indexes if [c.name for c in i.columns] == ['instance_uuid', 'device_name']]))
def _post_downgrade_006(self, engine): ifs = oslodbutils.get_table(engine, 'bm_interfaces') rows = ifs.select().where(ifs.c.bm_node_id == 1).execute().fetchall() self.assertEqual(len(rows), 1) self.assertEqual(rows[0]['address'], 'bb:bb:bb:bb:bb:bb') rows = ifs.select().where(ifs.c.bm_node_id == 2).execute().fetchall() self.assertEqual(len(rows), 0)
def _check_246(self, engine, data): pci_devices = oslodbutils.get_table(engine, 'pci_devices') self.assertEqual( 1, len([ fk for fk in pci_devices.foreign_keys if fk.parent.name == 'compute_node_id' ]))
def _post_downgrade_246(self, engine): pci_devices = oslodbutils.get_table(engine, 'pci_devices') self.assertEqual( 0, len([ fk for fk in pci_devices.foreign_keys if fk.parent.name == 'compute_node_id' ]))
def _check_233(self, engine, data): self.assertColumnExists(engine, 'compute_nodes', 'stats') compute_nodes = oslodbutils.get_table(engine, 'compute_nodes') self.assertIsInstance(compute_nodes.c.stats.type, sqlalchemy.types.Text) self.assertRaises(sqlalchemy.exc.NoSuchTableError, oslodbutils.get_table, engine, 'compute_node_stats')
def _pre_upgrade_006(self, engine): nodes = oslodbutils.get_table(engine, 'bm_nodes') ifs = oslodbutils.get_table(engine, 'bm_interfaces') # node 1 has two different addresses in bm_nodes and bm_interfaces engine.execute(nodes.insert(), [{'id': 1, 'prov_mac_address': 'aa:aa:aa:aa:aa:aa'}]) engine.execute(ifs.insert(), [{'id': 101, 'bm_node_id': 1, 'address': 'bb:bb:bb:bb:bb:bb'}]) # node 2 has one same address both in bm_nodes and bm_interfaces engine.execute(nodes.insert(), [{'id': 2, 'prov_mac_address': 'cc:cc:cc:cc:cc:cc'}]) engine.execute(ifs.insert(), [{'id': 201, 'bm_node_id': 2, 'address': 'cc:cc:cc:cc:cc:cc'}])
def assertIndexMembers(self, engine, table, index, members): self.assertIndexExists(engine, table, index) t = oslodbutils.get_table(engine, table) index_columns = None for idx in t.indexes: if idx.name == index: index_columns = idx.columns.keys() break self.assertEqual(sorted(members), sorted(index_columns))
def _check_010(self, engine, data): bm_nodes = oslodbutils.get_table(engine, 'bm_nodes') self.assertIn('preserve_ephemeral', bm_nodes.columns) default = engine.execute( sqlalchemy.select([bm_nodes.c.preserve_ephemeral]) .where(bm_nodes.c.id == data[0]['id']) ).scalar() self.assertEqual(default, False) bm_nodes.delete().where(bm_nodes.c.id == data[0]['id']).execute()
def _check_010(self, engine, data): bm_nodes = oslodbutils.get_table(engine, 'bm_nodes') self.assertIn('preserve_ephemeral', bm_nodes.columns) default = engine.execute( sqlalchemy.select([ bm_nodes.c.preserve_ephemeral ]).where(bm_nodes.c.id == data[0]['id'])).scalar() self.assertEqual(default, False) bm_nodes.delete().where(bm_nodes.c.id == data[0]['id']).execute()
def _check_245(self, engine, data): networks = oslodbutils.get_table(engine, 'networks') network = networks.select(networks.c.id == 1).execute().first() # mtu should default to None self.assertIsNone(network.mtu) # dhcp_server should default to None self.assertIsNone(network.dhcp_server) # enable dhcp should default to true self.assertTrue(network.enable_dhcp) # share address should default to false self.assertFalse(network.share_address)
def _check_230(self, engine, data): for table_name in ['instance_actions_events', 'shadow_instance_actions_events']: self.assertColumnExists(engine, table_name, 'host') self.assertColumnExists(engine, table_name, 'details') action_events = oslodbutils.get_table(engine, 'instance_actions_events') self.assertIsInstance(action_events.c.host.type, sqlalchemy.types.String) self.assertIsInstance(action_events.c.details.type, sqlalchemy.types.Text)
def _pre_upgrade_002(self, engine): data = [{ 'id': 1, 'key': 'fake-key', 'image_path': '/dev/null', 'pxe_config_path': '/dev/null/', 'root_mb': 0, 'swap_mb': 0 }] table = oslodbutils.get_table(engine, 'bm_deployments') engine.execute(table.insert(), data) return data
def _check_006(self, engine, data): ifs = oslodbutils.get_table(engine, 'bm_interfaces') rows = ifs.select().\ where(ifs.c.bm_node_id == 1).\ execute().\ fetchall() self.assertEqual(len(rows), 2) rows = ifs.select().\ where(ifs.c.bm_node_id == 2).\ execute().\ fetchall() self.assertEqual(len(rows), 1) self.assertEqual(rows[0]['address'], 'cc:cc:cc:cc:cc:cc')
def _check_227(self, engine, data): table = oslodbutils.get_table(engine, 'project_user_quotas') # Insert fake_quotas with the longest resource name. fake_quotas = {'id': 5, 'project_id': 'fake_project', 'user_id': 'fake_user', 'resource': 'injected_file_content_bytes', 'hard_limit': 10} table.insert().execute(fake_quotas) # Check we can get the longest resource name. quota = table.select(table.c.id == 5).execute().first() self.assertEqual(quota['resource'], 'injected_file_content_bytes')
def _post_downgrade_250(self, engine): oslodbutils.get_table(engine, 'instance_group_metadata') oslodbutils.get_table(engine, 'shadow_instance_group_metadata')
def _post_downgrade_248(self, engine): reservations = oslodbutils.get_table(engine, 'reservations') index_names = [idx.name for idx in reservations.indexes] self.assertNotIn('reservations_deleted_expire_idx', index_names)
def _check_005(self, engine, data): bm_nodes = oslodbutils.get_table(engine, 'bm_nodes') columns = [c.name for c in bm_nodes.columns] self.assertNotIn(u'prov_vlan_id', columns) self.assertNotIn(u'registration_status', columns)
def _check_229(self, engine, data): self.assertColumnExists(engine, 'compute_nodes', 'extra_resources') compute_nodes = oslodbutils.get_table(engine, 'compute_nodes') self.assertIsInstance(compute_nodes.c.extra_resources.type, sqlalchemy.types.Text)
def _check_244(self, engine, data): volume_usage_cache = oslodbutils.get_table( engine, 'volume_usage_cache') self.assertEqual(64, volume_usage_cache.c.user_id.type.length)
def _pre_upgrade_010(self, engine): bm_nodes = oslodbutils.get_table(engine, 'bm_nodes') data = [{'id': 10, 'prov_mac_address': 'cc:cc:cc:cc:cc:cc'}] engine.execute(bm_nodes.insert(), data) return data
def _post_downgrade_004(self, engine): bm_nodes = oslodbutils.get_table(engine, 'bm_nodes') self.assertNotIn(u'instance_name', [c.name for c in bm_nodes.columns])
def _pre_upgrade_002(self, engine): data = [{'id': 1, 'key': 'fake-key', 'image_path': '/dev/null', 'pxe_config_path': '/dev/null/', 'root_mb': 0, 'swap_mb': 0}] table = oslodbutils.get_table(engine, 'bm_deployments') engine.execute(table.insert(), data) return data
def assertIndexExists(self, engine, table, index): t = oslodbutils.get_table(engine, table) index_names = [idx.name for idx in t.indexes] self.assertIn(index, index_names)
def _check_007(self, engine, data): bm_nodes = oslodbutils.get_table(engine, 'bm_nodes') columns = [c.name for c in bm_nodes.columns] self.assertNotIn(u'prov_mac_address', columns)
def _post_downgrade_008(self, engine): oslodbutils.get_table(engine, 'bm_pxe_ips')
def _pre_upgrade_245(self, engine): # create a fake network networks = oslodbutils.get_table(engine, 'networks') fake_network = {'id': 1} networks.insert().execute(fake_network)
def _check_252(self, engine, data): oslodbutils.get_table(engine, 'instance_extra') oslodbutils.get_table(engine, 'shadow_instance_extra') self.assertIndexMembers(engine, 'instance_extra', 'instance_extra_idx', ['instance_uuid'])
def _post_downgrade_233(self, engine): self.assertColumnNotExists(engine, 'compute_nodes', 'stats') # confirm compute_node_stats exists oslodbutils.get_table(engine, 'compute_node_stats')
def _post_downgrade_244(self, engine): volume_usage_cache = oslodbutils.get_table(engine, 'volume_usage_cache') self.assertEqual(36, volume_usage_cache.c.user_id.type.length)
def _post_downgrade_244(self, engine): volume_usage_cache = oslodbutils.get_table( engine, 'volume_usage_cache') self.assertEqual(36, volume_usage_cache.c.user_id.type.length)
def _post_downgrade_010(self, engine): bm_nodes = oslodbutils.get_table(engine, 'bm_nodes') self.assertNotIn('preserve_ephemeral', bm_nodes.columns)
def assertColumnNotExists(self, engine, table, column): t = oslodbutils.get_table(engine, table) self.assertNotIn(column, t.c)