def _check_017(self, engine, data): """Test that added encryption information works correctly.""" # encryption key UUID volumes = db_utils.get_table(engine, 'volumes') self.assertIn('encryption_key_id', volumes.c) self.assertIsInstance(volumes.c.encryption_key_id.type, self.VARCHAR_TYPE) snapshots = db_utils.get_table(engine, 'snapshots') self.assertIn('encryption_key_id', snapshots.c) self.assertIsInstance(snapshots.c.encryption_key_id.type, self.VARCHAR_TYPE) self.assertIn('volume_type_id', snapshots.c) self.assertIsInstance(snapshots.c.volume_type_id.type, self.VARCHAR_TYPE) # encryption types table encryption = db_utils.get_table(engine, 'encryption') self.assertIsInstance(encryption.c.volume_type_id.type, self.VARCHAR_TYPE) self.assertIsInstance(encryption.c.cipher.type, self.VARCHAR_TYPE) self.assertIsInstance(encryption.c.key_size.type, self.INTEGER_TYPE) self.assertIsInstance(encryption.c.provider.type, self.VARCHAR_TYPE)
def _check_075(self, engine, data): """Test adding cluster table and cluster_id fields.""" self.assertTrue(engine.dialect.has_table(engine.connect(), 'clusters')) clusters = db_utils.get_table(engine, 'clusters') # Inherited fields from CinderBase self.assertIsInstance(clusters.c.created_at.type, self.TIME_TYPE) self.assertIsInstance(clusters.c.updated_at.type, self.TIME_TYPE) self.assertIsInstance(clusters.c.deleted_at.type, self.TIME_TYPE) self.assertIsInstance(clusters.c.deleted.type, self.BOOL_TYPE) # Cluster specific fields self.assertIsInstance(clusters.c.id.type, self.INTEGER_TYPE) self.assertIsInstance(clusters.c.name.type, self.VARCHAR_TYPE) self.assertIsInstance(clusters.c.binary.type, self.VARCHAR_TYPE) self.assertIsInstance(clusters.c.disabled.type, self.BOOL_TYPE) self.assertIsInstance(clusters.c.disabled_reason.type, self.VARCHAR_TYPE) # Check that we have added cluster_name field to all required tables for table_name in ('services', 'consistencygroups', 'volumes'): table = db_utils.get_table(engine, table_name) self.assertIsInstance(table.c.cluster_name.type, self.VARCHAR_TYPE)
def _check_578f84f38d(self, engine, data): nodes = db_utils.get_table(engine, 'nodes') col_names = [column.name for column in nodes.c] self.assertIn('uuid', col_names) self.assertIsInstance(nodes.c.uuid.type, sqlalchemy.types.String) self.assertIn('started_at', col_names) self.assertIsInstance(nodes.c.started_at.type, sqlalchemy.types.Float) self.assertIn('finished_at', col_names) self.assertIsInstance(nodes.c.started_at.type, sqlalchemy.types.Float) self.assertIn('error', col_names) self.assertIsInstance(nodes.c.error.type, sqlalchemy.types.Text) attributes = db_utils.get_table(engine, 'attributes') col_names = [column.name for column in attributes.c] self.assertIn('uuid', col_names) self.assertIsInstance(attributes.c.uuid.type, sqlalchemy.types.String) self.assertIn('name', col_names) self.assertIsInstance(attributes.c.name.type, sqlalchemy.types.String) self.assertIn('value', col_names) self.assertIsInstance(attributes.c.value.type, sqlalchemy.types.String) options = db_utils.get_table(engine, 'options') col_names = [column.name for column in options.c] self.assertIn('uuid', col_names) self.assertIsInstance(options.c.uuid.type, sqlalchemy.types.String) self.assertIn('name', col_names) self.assertIsInstance(options.c.name.type, sqlalchemy.types.String) self.assertIn('value', col_names) self.assertIsInstance(options.c.value.type, sqlalchemy.types.Text)
def _check_487deb87cc9d(self, engine, data): conductors = db_utils.get_table(engine, 'conductors') column_names = [column.name for column in conductors.c] self.assertIn('online', column_names) self.assertIsInstance(conductors.c.online.type, (sqlalchemy.types.Boolean, sqlalchemy.types.Integer)) nodes = db_utils.get_table(engine, 'nodes') column_names = [column.name for column in nodes.c] self.assertIn('conductor_affinity', column_names) self.assertIsInstance(nodes.c.conductor_affinity.type, sqlalchemy.types.Integer) data_conductor = {'hostname': 'test_host'} conductors.insert().execute(data_conductor) conductor = conductors.select( conductors.c.hostname == data_conductor['hostname']).execute().first() data_node = {'uuid': uuidutils.generate_uuid(), 'conductor_affinity': conductor['id']} nodes.insert().execute(data_node) node = nodes.select( nodes.c.uuid == data_node['uuid']).execute().first() self.assertEqual(conductor['id'], node['conductor_affinity'])
def _check_5ea1b0d310e(self, engine, data): portgroup = db_utils.get_table(engine, "portgroups") col_names = [column.name for column in portgroup.c] expected_names = ["created_at", "updated_at", "id", "uuid", "name", "node_id", "address", "extra"] self.assertEqual(sorted(expected_names), sorted(col_names)) self.assertIsInstance(portgroup.c.created_at.type, sqlalchemy.types.DateTime) self.assertIsInstance(portgroup.c.updated_at.type, sqlalchemy.types.DateTime) self.assertIsInstance(portgroup.c.id.type, sqlalchemy.types.Integer) self.assertIsInstance(portgroup.c.uuid.type, sqlalchemy.types.String) self.assertIsInstance(portgroup.c.name.type, sqlalchemy.types.String) self.assertIsInstance(portgroup.c.node_id.type, sqlalchemy.types.Integer) self.assertIsInstance(portgroup.c.address.type, sqlalchemy.types.String) self.assertIsInstance(portgroup.c.extra.type, sqlalchemy.types.TEXT) ports = db_utils.get_table(engine, "ports") col_names = [column.name for column in ports.c] self.assertIn("pxe_enabled", col_names) self.assertIn("portgroup_id", col_names) self.assertIn("local_link_connection", col_names) self.assertIsInstance(ports.c.portgroup_id.type, sqlalchemy.types.Integer) # in some backends bool type is integer self.assertTrue( isinstance(ports.c.pxe_enabled.type, sqlalchemy.types.Boolean) or isinstance(ports.c.pxe_enabled.type, sqlalchemy.types.Integer) )
def test_upgrade(self): self.assertTableDoesNotExist(self.identity_provider) self.assertTableDoesNotExist(self.federation_protocol) self.assertTableDoesNotExist(self.mapping) self.upgrade(1, repository=self.repo_path) self.assertTableColumns(self.identity_provider, ['id', 'enabled', 'description']) self.assertTableColumns(self.federation_protocol, ['id', 'idp_id', 'mapping_id']) self.upgrade(2, repository=self.repo_path) self.assertTableColumns(self.mapping, ['id', 'rules']) federation_protocol = utils.get_table( self.engine, 'federation_protocol') with self.engine.begin() as conn: conn.execute(federation_protocol.insert(), id=0, idp_id=1) self.upgrade(3, repository=self.repo_path) federation_protocol = utils.get_table( self.engine, 'federation_protocol') self.assertFalse(federation_protocol.c.mapping_id.nullable)
def _check_5ea1b0d310e(self, engine, data): portgroup = db_utils.get_table(engine, 'portgroups') col_names = [column.name for column in portgroup.c] expected_names = ['created_at', 'updated_at', 'id', 'uuid', 'name', 'node_id', 'address', 'extra'] self.assertEqual(sorted(expected_names), sorted(col_names)) self.assertIsInstance(portgroup.c.created_at.type, sqlalchemy.types.DateTime) self.assertIsInstance(portgroup.c.updated_at.type, sqlalchemy.types.DateTime) self.assertIsInstance(portgroup.c.id.type, sqlalchemy.types.Integer) self.assertIsInstance(portgroup.c.uuid.type, sqlalchemy.types.String) self.assertIsInstance(portgroup.c.name.type, sqlalchemy.types.String) self.assertIsInstance(portgroup.c.node_id.type, sqlalchemy.types.Integer) self.assertIsInstance(portgroup.c.address.type, sqlalchemy.types.String) self.assertIsInstance(portgroup.c.extra.type, sqlalchemy.types.TEXT) ports = db_utils.get_table(engine, 'ports') col_names = [column.name for column in ports.c] self.assertIn('pxe_enabled', col_names) self.assertIn('portgroup_id', col_names) self.assertIn('local_link_connection', col_names) self.assertIsInstance(ports.c.portgroup_id.type, sqlalchemy.types.Integer) # in some backends bool type is integer self.assertIsInstance(ports.c.pxe_enabled.type, (sqlalchemy.types.Boolean, sqlalchemy.types.Integer))
def _check_267(self, engine, data): # Make sure the column is non-nullable and the UC exists. fixed_ips = oslodbutils.get_table(engine, 'fixed_ips') self.assertTrue(fixed_ips.c.instance_uuid.nullable) fixed_ip = fixed_ips.select(fixed_ips.c.id == 1).execute().first() self.assertIsNone(fixed_ip.instance_uuid) instances = oslodbutils.get_table(engine, 'instances') self.assertFalse(instances.c.uuid.nullable) inspector = reflection.Inspector.from_engine(engine) constraints = inspector.get_unique_constraints('instances') constraint_names = [constraint['name'] for constraint in constraints] self.assertIn('uniq_instances0uuid', constraint_names) # Make sure the instances record with the valid uuid is still there. instance = instances.select(instances.c.id == 1).execute().first() self.assertIsNotNone(instance) # Check that the null entry in the volumes table is still there since # we skipped tables that don't have FK's back to the instances table. volumes = oslodbutils.get_table(engine, 'volumes') self.assertTrue(volumes.c.instance_uuid.nullable) volume = fixed_ips.select( volumes.c.id == '9c3c317e-24db-4d57-9a6f-96e6d477c1da' ).execute().first() self.assertIsNone(volume.instance_uuid)
def _check_08e1515a576c(self, engine, data): self.assertEqual("08e1515a576c", api.get_backend().schema_revision(engine=engine)) tasks = self._08e1515a576c_logs deployment_table = db_utils.get_table(engine, "deployments") task_table = db_utils.get_table(engine, "tasks") with engine.connect() as conn: tasks_found = conn.execute(task_table.select()).fetchall() for task in tasks_found: actual_log = json.loads(task.verification_log) self.assertIsInstance(actual_log, dict) expected = tasks[int(task.uuid)]["post"] for key in expected: self.assertEqual(expected[key], actual_log[key]) conn.execute( task_table.delete().where(task_table.c.uuid == task.uuid)) deployment_uuid = self._08e1515a576c_deployment_uuid conn.execute( deployment_table.delete().where( deployment_table.c.uuid == deployment_uuid) )
def _pre_upgrade_045(self, engine): raw_template = utils.get_table(engine, 'raw_template') templ = [] for i in range(200, 203, 1): t = dict(id=i, template='{}', files='{}') engine.execute(raw_template.insert(), [t]) templ.append(t) user_creds = utils.get_table(engine, 'user_creds') user = [dict(id=6, username='******', password='******', tenant='mine', auth_url='bla', tenant_id=str(uuid.uuid4()), trust_id='', trustor_user_id='')] engine.execute(user_creds.insert(), user) stack = utils.get_table(engine, 'stack') stack_ids = [('s1', '967aaefb-152e-505d-b13a-35d4c816390c', 0), ('s2', '9e9deba9-a303-5f29-84d3-c8165647c47e', 1), ('s1*', '9a4bd1ec-8b21-56cd-964a-f66cb1cfa2f9', 2)] data = [dict(id=ll_id, name=name, raw_template_id=templ[templ_id]['id'], user_creds_id=user[0]['id'], username='******', disable_rollback=True) for name, ll_id, templ_id in stack_ids] data[2]['owner_id'] = '967aaefb-152e-505d-b13a-35d4c816390c' engine.execute(stack.insert(), data) return data
def downgrade(self, migrate_engine): UniqueConstraint( "uuid", table=db_utils.get_table(migrate_engine, "instances"), name="uniq_instances0uuid" ).drop() for table_name in ("instances", "shadow_instances"): table = db_utils.get_table(migrate_engine, table_name) table.columns.uuid.alter(nullable=True)
def _check_057(self, engine, data): def uuid_in_res_data(res_uuid): for rd in data['resource']: if rd['id'] == res_uuid: return True return False def rd_matches_old_data(key, value, res_uuid): for rd in data['resource_data']: if (rd['resource_id'] == res_uuid and rd['key'] == key and rd['value'] == value): return True return False self.assertColumnIsNotNullable(engine, 'resource', 'id') res_table = utils.get_table(engine, 'resource') res_in_db = list(res_table.select().execute()) # confirm the resource.id is an int and the uuid field has been # copied from the old id. for r in res_in_db: self.assertIsInstance(r.id, six.integer_types) self.assertTrue(uuid_in_res_data(r.uuid)) # confirm that the new resource_id points to the correct resource. rd_table = utils.get_table(engine, 'resource_data') rd_in_db = list(rd_table.select().execute()) for rd in rd_in_db: for r in res_in_db: if rd.resource_id == r.id: self.assertTrue(rd_matches_old_data(rd.key, rd.value, r.uuid))
def _pre_upgrade_056(self, engine): raw_template = utils.get_table(engine, 'raw_template') templ = [] for i in range(900, 903, 1): t = dict(id=i, template='{}', files='{}') engine.execute(raw_template.insert(), [t]) templ.append(t) user_creds = utils.get_table(engine, 'user_creds') user = [dict(id=uid, username='******', password='******', tenant='test_project', auth_url='bla', tenant_id=str(uuid.uuid4()), trust_id='', trustor_user_id='') for uid in range(900, 903)] engine.execute(user_creds.insert(), user) stack = utils.get_table(engine, 'stack') stack_ids = [('967aaefa-152e-405d-b13a-35d4c816390c', 0), ('9e9debab-a303-4f29-84d3-c8165647c47e', 1), ('9a4bd1e9-8b21-46cd-964a-f66cb1cfa2f9', 2)] data = [dict(id=ll_id, name=ll_id, raw_template_id=templ[templ_id]['id'], user_creds_id=user[templ_id]['id'], username='******', disable_rollback=True, parameters='test_params', created_at=timeutils.utcnow(), deleted_at=None) for ll_id, templ_id in stack_ids] data[-1]['deleted_at'] = timeutils.utcnow() engine.execute(stack.insert(), data) return data
def _pre_upgrade_004(self, engine): """Change volume types to UUID """ data = { "volumes": [ {"id": str(uuid.uuid4()), "host": "test1", "volume_type_id": 1}, {"id": str(uuid.uuid4()), "host": "test2", "volume_type_id": 1}, {"id": str(uuid.uuid4()), "host": "test3", "volume_type_id": 3}, ], "volume_types": [{"name": "vtype1"}, {"name": "vtype2"}, {"name": "vtype3"}], "volume_type_extra_specs": [ {"volume_type_id": 1, "key": "v1", "value": "hotep"}, {"volume_type_id": 1, "key": "v2", "value": "bending rodrigez"}, {"volume_type_id": 2, "key": "v3", "value": "bending rodrigez"}, ], } volume_types = db_utils.get_table(engine, "volume_types") for vtype in data["volume_types"]: r = volume_types.insert().values(vtype).execute() vtype["id"] = r.inserted_primary_key[0] volume_type_es = db_utils.get_table(engine, "volume_type_extra_specs") for vtes in data["volume_type_extra_specs"]: r = volume_type_es.insert().values(vtes).execute() vtes["id"] = r.inserted_primary_key[0] volumes = db_utils.get_table(engine, "volumes") for vol in data["volumes"]: r = volumes.insert().values(vol).execute() vol["id"] = r.inserted_primary_key[0] return data
def downgrade(self, migrate_engine): UniqueConstraint('uuid', table=db_utils.get_table(migrate_engine, 'instances'), name='uniq_instances0uuid').drop() for table_name in ('instances', 'shadow_instances'): table = db_utils.get_table(migrate_engine, table_name) table.columns.uuid.alter(nullable=True)
def _check_253(self, engine, data): self.assertColumnExists(engine, "instance_extra", "pci_requests") self.assertColumnExists(engine, "shadow_instance_extra", "pci_requests") instance_extra = oslodbutils.get_table(engine, "instance_extra") shadow_instance_extra = oslodbutils.get_table(engine, "shadow_instance_extra") self.assertIsInstance(instance_extra.c.pci_requests.type, sqlalchemy.types.Text) self.assertIsInstance(shadow_instance_extra.c.pci_requests.type, sqlalchemy.types.Text)
def _check_3b935b28e7a0(self, engine, data): subnets = db_utils.get_table(engine, 'ipamsubnets') pools = db_utils.get_table(engine, 'ipamallocationpools') allocations = db_utils.get_table(engine, 'ipamallocations') ipam_subnets = engine.execute(subnets.select()).fetchall() # Count of ipam subnets should match count of usual subnets self.assertEqual(len(data), len(ipam_subnets)) neutron_to_ipam_id = {subnet.neutron_subnet_id: subnet.id for subnet in ipam_subnets} for cidr in data: self.assertIn(data[cidr]['id'], neutron_to_ipam_id) ipam_subnet_id = neutron_to_ipam_id[data[cidr]['id']] # Validate ip allocations are migrated correctly ipam_allocations = engine.execute(allocations.select().where( allocations.c.ipam_subnet_id == ipam_subnet_id)).fetchall() for ipam_allocation in ipam_allocations: self.assertIn(ipam_allocation.ip_address, data[cidr]['allocations']) self.assertEqual(len(data[cidr]['allocations']), len(ipam_allocations)) # Validate allocation pools are migrated correctly ipam_pools = engine.execute(pools.select().where( pools.c.ipam_subnet_id == ipam_subnet_id)).fetchall() # Covert to dict for easier lookup pool_dict = {pool.first_ip: pool.last_ip for pool in ipam_pools} for p in data[cidr]['pools']: self.assertIn(p['first_ip'], pool_dict) self.assertEqual(p['last_ip'], pool_dict[p['first_ip']]) self.assertEqual(len(data[cidr]['pools']), len(ipam_pools))
def _check_6ad4f426f005(self, engine, data): self.assertEqual("6ad4f426f005", api.get_backend().schema_revision(engine=engine)) deployment_table = db_utils.get_table(engine, "deployments") task_table = db_utils.get_table(engine, "tasks") task_result_table = db_utils.get_table(engine, "task_results") with engine.connect() as conn: task_results = conn.execute(task_result_table.select()).fetchall() self.assertEqual(1, len(task_results)) task_result = task_results[0] # check that "hooks" field added self.assertEqual({"hooks": []}, json.loads(task_result.data)) # Remove task result conn.execute( task_result_table.delete().where( task_result_table.c.id == task_result.id) ) # Remove task conn.execute( task_table.delete().where(task_table.c.uuid == "my_task")) # Remove deployment conn.execute( deployment_table.delete().where( deployment_table.c.uuid == "my_deployment") )
def _check_ee6d6ae007c1(self, engine, data): # NOTE(alaski): This is an example check. For this migration the # models sync check tests everything needed. tasks_table = sql_utils.get_table(engine, 'tasks') self.assertIsNotNone(tasks_table) task_items_table = sql_utils.get_table(engine, 'task_items') self.assertIsNotNone(task_items_table)
def downgrade(migrate_engine): # drop the unique constraint on instances.uuid UniqueConstraint("uuid", table=utils.get_table(migrate_engine, "instances"), name=UC_NAME).drop() # We can't bring the deleted records back but we can make uuid nullable. for table_name in ("instances", "shadow_instances"): table = utils.get_table(migrate_engine, table_name) table.columns.uuid.alter(nullable=True)
def _check_276(self, engine, data): self.assertColumnExists(engine, "instance_extra", "vcpu_model") self.assertColumnExists(engine, "shadow_instance_extra", "vcpu_model") instance_extra = oslodbutils.get_table(engine, "instance_extra") shadow_instance_extra = oslodbutils.get_table(engine, "shadow_instance_extra") self.assertIsInstance(instance_extra.c.vcpu_model.type, sqlalchemy.types.Text) self.assertIsInstance(shadow_instance_extra.c.vcpu_model.type, sqlalchemy.types.Text)
def _check_251(self, engine, data): self.assertColumnExists(engine, "compute_nodes", "numa_topology") self.assertColumnExists(engine, "shadow_compute_nodes", "numa_topology") compute_nodes = oslodbutils.get_table(engine, "compute_nodes") shadow_compute_nodes = oslodbutils.get_table(engine, "shadow_compute_nodes") self.assertIsInstance(compute_nodes.c.numa_topology.type, sqlalchemy.types.Text) self.assertIsInstance(shadow_compute_nodes.c.numa_topology.type, sqlalchemy.types.Text)
def _check_254(self, engine, data): self.assertColumnExists(engine, "pci_devices", "request_id") self.assertColumnExists(engine, "shadow_pci_devices", "request_id") pci_devices = oslodbutils.get_table(engine, "pci_devices") shadow_pci_devices = oslodbutils.get_table(engine, "shadow_pci_devices") self.assertIsInstance(pci_devices.c.request_id.type, sqlalchemy.types.String) self.assertIsInstance(shadow_pci_devices.c.request_id.type, sqlalchemy.types.String)
def _create_record_with_sa(engine, resource_type, attributes): """Create a record with standard attributes.""" sa_table = db_utils.get_table(engine, 'standardattributes') sa_record = engine.execute(sa_table.insert().values( {'resource_type': resource_type})) attributes['standard_attr_id'] = sa_record.inserted_primary_key[0] resource_table = db_utils.get_table(engine, resource_type) engine.execute(resource_table.insert().values(attributes))
def downgrade(migrate_engine): actions_events = utils.get_table(migrate_engine, 'instance_actions_events') actions_events.drop_column('host') actions_events.drop_column('details') shadow_actions_events = utils.get_table(migrate_engine, api._SHADOW_TABLE_PREFIX + 'instance_actions_events') shadow_actions_events.drop_column('host') shadow_actions_events.drop_column('details')
def _check_294(self, engine, data): self.assertColumnExists(engine, "services", "last_seen_up") self.assertColumnExists(engine, "shadow_services", "last_seen_up") services = oslodbutils.get_table(engine, "services") shadow_services = oslodbutils.get_table(engine, "shadow_services") self.assertIsInstance(services.c.last_seen_up.type, sqlalchemy.types.DateTime) self.assertIsInstance(shadow_services.c.last_seen_up.type, sqlalchemy.types.DateTime)
def _post_downgrade_017(self, engine): volumes = db_utils.get_table(engine, "volumes") self.assertNotIn("encryption_key_id", volumes.c) snapshots = db_utils.get_table(engine, "snapshots") self.assertNotIn("encryption_key_id", snapshots.c) self.assertFalse(engine.dialect.has_table(engine.connect(), "encryption"))
def _check_268(self, engine, data): # We can only assert that the col exists, not the unique constraint # as the engine is running sqlite self.assertColumnExists(engine, "compute_nodes", "host") self.assertColumnExists(engine, "shadow_compute_nodes", "host") compute_nodes = oslodbutils.get_table(engine, "compute_nodes") shadow_compute_nodes = oslodbutils.get_table(engine, "shadow_compute_nodes") self.assertIsInstance(compute_nodes.c.host.type, sqlalchemy.types.String) self.assertIsInstance(shadow_compute_nodes.c.host.type, sqlalchemy.types.String)
def _check_247(self, engine, data): quota_usages = oslodbutils.get_table(engine, "quota_usages") self.assertFalse(quota_usages.c.resource.nullable) pci_devices = oslodbutils.get_table(engine, "pci_devices") self.assertTrue(pci_devices.c.deleted.nullable) self.assertFalse(pci_devices.c.product_id.nullable) self.assertFalse(pci_devices.c.vendor_id.nullable) self.assertFalse(pci_devices.c.dev_type.nullable)
def _post_downgrade_017(self, engine): volumes = db_utils.get_table(engine, 'volumes') self.assertNotIn('encryption_key_id', volumes.c) snapshots = db_utils.get_table(engine, 'snapshots') self.assertNotIn('encryption_key_id', snapshots.c) self.assertFalse(engine.dialect.has_table(engine.connect(), 'encryption'))
def test_upgrade_dirty_instance_groups(self): igs = db_utils.get_table(self.engine, 'instance_groups') igs.insert().execute(self.ig_values) self.assertRaises(exception.ValidationError, self.migration.upgrade, self.engine)
def _create_host_mapping(self, **values): mappings = db_utils.get_table(self.engine, 'host_mappings') return mappings.insert().execute(**values).inserted_primary_key[0]
def test_upgrade_with_deleted_instance_groups(self): igs = db_utils.get_table(self.engine, 'instance_groups') group_id = igs.insert().execute(self.ig_values).inserted_primary_key[0] igs.update().where(igs.c.id == group_id).values( deleted=group_id).execute() self.migration.upgrade(self.engine)
def assertColumnNotExists(self, engine, table, column): t = db_utils.get_table(engine, table) self.assertNotIn(column, t.c)
def assertColumnCount(self, engine, table, columns): t = db_utils.get_table(engine, table) self.assertEqual(len(t.columns), len(columns))
def _check_242cc6a923b3(self, engine, data): nodes = db_utils.get_table(engine, 'nodes') col_names = [column.name for column in nodes.c] self.assertIn('maintenance_reason', col_names) self.assertIsInstance(nodes.c.maintenance_reason.type, sqlalchemy.types.String)
def assertIndexExists(self, engine, table, index): t = db_utils.get_table(engine, table) index_names = [idx.name for idx in t.indexes] self.assertIn(index, index_names)
def upgrade(migrate_engine): stored_file = utils.get_table(migrate_engine, 'stored_file') registry_path = Column('registry_path', String(256), nullable=True) stored_file.create_column(registry_path)
def _check_28c44432c9c3(self, engine, data): nodes_tbl = db_utils.get_table(engine, 'nodes') col_names = [column.name for column in nodes_tbl.c] self.assertIn('description', col_names) self.assertIsInstance(nodes_tbl.c.description.type, sqlalchemy.types.TEXT)
def _check_229(self, engine, data): self.assertColumnExists(engine, 'compute_nodes', 'extra_resources') compute_nodes = oslodbutils.get_table(engine, 'compute_nodes') self.assertIsInstance(compute_nodes.c.extra_resources.type, sqlalchemy.types.Text)
def _check_f190f9d00a11(self, engine, data): nodes = db_utils.get_table(engine, 'nodes') col_names = [column.name for column in nodes.c] self.assertIn('owner', col_names)
def _check_244(self, engine, data): volume_usage_cache = oslodbutils.get_table(engine, 'volume_usage_cache') self.assertEqual(64, volume_usage_cache.c.user_id.type.length)
def _check_21b331f883ef(self, engine, data): nodes = db_utils.get_table(engine, 'nodes') col_names = [column.name for column in nodes.c] self.assertIn('provision_updated_at', col_names) self.assertIsInstance(nodes.c.provision_updated_at.type, sqlalchemy.types.DateTime)
def _pre_upgrade_245(self, engine): # create a fake network networks = oslodbutils.get_table(engine, 'networks') fake_network = {'id': 1} networks.insert().execute(fake_network)
def _check_fb3f10dd262e(self, engine, data): nodes_tbl = db_utils.get_table(engine, 'nodes') col_names = [column.name for column in nodes_tbl.c] self.assertIn('fault', col_names) self.assertIsInstance(nodes_tbl.c.fault.type, sqlalchemy.types.String)
def _check_252(self, engine, data): oslodbutils.get_table(engine, 'instance_extra') oslodbutils.get_table(engine, 'shadow_instance_extra') self.assertIndexMembers(engine, 'instance_extra', 'instance_extra_idx', ['instance_uuid'])
def _check_005(self, engine, data): failover_segments = oslodbutils.get_table(engine, 'failover_segments') hosts = oslodbutils.get_table(engine, 'hosts') for table in [failover_segments, hosts]: self.assertTrue(table.c.created_at.nullable)
def _pre_upgrade_293(self, engine): migrations = oslodbutils.get_table(engine, 'migrations') fake_migration = {} migrations.insert().execute(fake_migration)
def _check_278(self, engine, data): compute_nodes = oslodbutils.get_table(engine, 'compute_nodes') self.assertEqual(0, len([fk for fk in compute_nodes.foreign_keys if fk.parent.name == 'service_id'])) self.assertTrue(compute_nodes.c.service_id.nullable)
def _check_31baaf680d2b(self, engine, data): nodes = db_utils.get_table(engine, 'nodes') col_names = [column.name for column in nodes.c] self.assertIn('instance_info', col_names) self.assertIsInstance(nodes.c.instance_info.type, sqlalchemy.types.TEXT)
def _check_d2b036ae9378(self, engine, data): nodes = db_utils.get_table(engine, 'nodes') col_names = [column.name for column in nodes.c] self.assertIn('automated_clean', col_names)
def _check_b9117ac17882(self, engine, data): nodes = db_utils.get_table(engine, 'nodes') col_names = [column.name for column in nodes.c] self.assertIn('deploy_step', col_names) self.assertIsInstance(nodes.c.deploy_step.type, sqlalchemy.types.String)
def _check_246(self, engine, data): pci_devices = oslodbutils.get_table(engine, 'pci_devices') self.assertEqual(1, len([fk for fk in pci_devices.foreign_keys if fk.parent.name == 'compute_node_id']))
def test_upgrade_dirty_flavors(self): flavors = db_utils.get_table(self.engine, 'instance_types') flavors.insert().execute(self.flavor_values) self.assertRaises(exception.ValidationError, self.migration.upgrade, self.engine)
def _pre_upgrade_54e844ebfbc3(self, engine): self._54e844ebfbc3_deployments = { # right config which should not be changed after migration "should-not-be-changed-1": { "admin": {"username": "******", "password": "******", "project_name": "admin"}, "auth_url": "http://example.com:5000/v3", "region_name": "RegionOne", "type": "ExistingCloud"}, # right config which should not be changed after migration "should-not-be-changed-2": { "admin": {"username": "******", "password": "******", "tenant_name": "admin"}, "users": [{"username": "******", "password": "******", "tenant_name": "admin"}], "auth_url": "http://example.com:5000/v2.0", "region_name": "RegionOne", "type": "ExistingCloud"}, # not ExistingCloud config which should not be changed "should-not-be-changed-3": { "url": "example.com", "type": "Something"}, # normal config created with "fromenv" feature "from-env": { "admin": {"username": "******", "password": "******", "tenant_name": "admin", "project_domain_name": "", "user_domain_name": ""}, "auth_url": "http://example.com:5000/v2.0", "region_name": "RegionOne", "type": "ExistingCloud"}, # public endpoint + keystone v3 config with tenant_name "ksv3_public": { "admin": {"username": "******", "password": "******", "tenant_name": "admin", "user_domain_name": "bla", "project_domain_name": "foo"}, "auth_url": "http://example.com:5000/v3", "region_name": "RegionOne", "type": "ExistingCloud", "endpoint_type": "public"}, # internal endpoint + existing_users "existing_internal": { "admin": {"username": "******", "password": "******", "tenant_name": "admin"}, "users": [{"username": "******", "password": "******", "tenant_name": "admin", "project_domain_name": "", "user_domain_name": ""}], "auth_url": "http://example.com:5000/v2.0", "region_name": "RegionOne", "type": "ExistingCloud", "endpoint_type": "internal"} } deployment_table = db_utils.get_table(engine, "deployments") deployment_status = consts.DeployStatus.DEPLOY_FINISHED with engine.connect() as conn: for deployment in self._54e844ebfbc3_deployments: conf = json.dumps(self._54e844ebfbc3_deployments[deployment]) conn.execute( deployment_table.insert(), [{"uuid": deployment, "name": deployment, "config": conf, "enum_deployments_status": deployment_status, "credentials": six.b(json.dumps([])), "users": six.b(json.dumps([])) }])
def assertColumnType(self, engine, table, column, sqltype): t = db_utils.get_table(engine, table) col = getattr(t.c, column) self.assertIsInstance(col.type, sqltype)
def _check_405cfe08f18d(self, engine, data): nodes = db_utils.get_table(engine, 'nodes') col_names = [column.name for column in nodes.c] self.assertIn('rescue_interface', col_names) self.assertIsInstance(nodes.c.rescue_interface.type, sqlalchemy.types.String)
def _check_280(self, engine, data): key_pairs = oslodbutils.get_table(engine, 'key_pairs') self.assertFalse(key_pairs.c.name.nullable)
def _check_bb59b63f55a(self, engine, data): nodes = db_utils.get_table(engine, 'nodes') col_names = [column.name for column in nodes.c] self.assertIn('driver_internal_info', col_names) self.assertIsInstance(nodes.c.driver_internal_info.type, sqlalchemy.types.TEXT)
def _check_2d13bc3d6bba(self, engine, data): nodes = db_utils.get_table(engine, 'nodes') col_names = [column.name for column in nodes.c] self.assertIn('bios_interface', col_names) self.assertIsInstance(nodes.c.bios_interface.type, sqlalchemy.types.String)