def setUp(self): super(TestFlavorCheck, self).setUp() self.context = context.get_admin_context() self.migration = importlib.import_module( 'nova.db.sqlalchemy.migrate_repo.versions.' '291_enforce_flavors_migrated') self.engine = db_api.get_engine()
def test_fixture_reset(self): # because this sets up reasonable db connection strings self.useFixture(conf_fixture.ConfFixture()) self.useFixture(fixtures.Database()) engine = session.get_engine() conn = engine.connect() result = conn.execute("select * from instance_types") rows = result.fetchall() self.assertEqual(len(rows), 5, "Rows %s" % rows) # insert a 6th instance type, column 5 below is an int id # which has a constraint on it, so if new standard instance # types are added you have to bump it. conn.execute("insert into instance_types VALUES " "(NULL, NULL, NULL, 't1.test', 6, 4096, 2, 0, NULL, '87'" ", 1.0, 40, 0, 0, 1, 0)") result = conn.execute("select * from instance_types") rows = result.fetchall() self.assertEqual(len(rows), 6, "Rows %s" % rows) # reset by invoking the fixture again # # NOTE(sdague): it's important to reestablish the db # connection because otherwise we have a reference to the old # in mem db. self.useFixture(fixtures.Database()) conn = engine.connect() result = conn.execute("select * from instance_types") rows = result.fetchall() self.assertEqual(len(rows), 5, "Rows %s" % rows)
def get_engine(database='main'): if database == 'main': return db_session.get_engine() if database == 'api': return db_session.get_api_engine() if database == 'placement': return db_session.get_placement_engine()
def get_engine(database='main', context=None): if database == 'main': return db_session.get_engine(context=context) if database == 'api': return db_session.get_api_engine() if database == 'placement': return placement_db.get_placement_engine()
def test_fixture_reset(self): # because this sets up reasonable db connection strings self.useFixture(conf_fixture.ConfFixture()) self.useFixture(fixtures.Database()) engine = session.get_engine() conn = engine.connect() result = conn.execute("select * from instance_types") rows = result.fetchall() self.assertEqual(5, len(rows), "Rows %s" % rows) # insert a 6th instance type, column 5 below is an int id # which has a constraint on it, so if new standard instance # types are added you have to bump it. conn.execute("insert into instance_types VALUES " "(NULL, NULL, NULL, 't1.test', 6, 4096, 2, 0, NULL, '87'" ", 1.0, 40, 0, 0, 1, 0)") result = conn.execute("select * from instance_types") rows = result.fetchall() self.assertEqual(6, len(rows), "Rows %s" % rows) # reset by invoking the fixture again # # NOTE(sdague): it's important to reestablish the db # connection because otherwise we have a reference to the old # in mem db. self.useFixture(fixtures.Database()) conn = engine.connect() result = conn.execute("select * from instance_types") rows = result.fetchall() self.assertEqual(5, len(rows), "Rows %s" % rows)
def _check_request_spec_migration(self): """Checks to make sure request spec migrations are complete. Iterates all cells checking to see that non-deleted instances have a matching request spec in the API database. This is necessary in order to drop the migrate_instances_add_request_spec online data migration and accompanying compatibility code found through nova-api and nova-conductor. """ meta = MetaData(bind=db_session.get_api_engine()) cell_mappings = Table('cell_mappings', meta, autoload=True) mappings = cell_mappings.select().execute().fetchall() if not mappings: # There are no cell mappings so we can't determine this, just # return a warning. The cellsv2 check would have already failed # on this. msg = (_('Unable to determine request spec migrations without ' 'cell mappings.')) return UpgradeCheckResult(UpgradeCheckCode.WARNING, msg) request_specs = Table('request_specs', meta, autoload=True) ctxt = nova_context.get_admin_context() incomplete_cells = [] # list of cell mapping uuids for mapping in mappings: with nova_context.target_cell(ctxt, mapping) as cctxt: # Get all instance uuids for non-deleted instances in this # cell. meta = MetaData(bind=db_session.get_engine(context=cctxt)) instances = Table('instances', meta, autoload=True) instance_records = ( select([instances.c.uuid]).select_from(instances).where( instances.c.deleted == 0 ).execute().fetchall()) # For each instance in the list, verify that it has a matching # request spec in the API DB. for inst in instance_records: spec_id = ( select([request_specs.c.id]).select_from( request_specs).where( request_specs.c.instance_uuid == inst['uuid'] ).execute().scalar()) if spec_id is None: # This cell does not have all of its instances # migrated for request specs so track it and move on. incomplete_cells.append(mapping['uuid']) break # It's a failure if there are any unmigrated instances at this point # because we are planning to drop the online data migration routine and # compatibility code in Stein. if incomplete_cells: msg = (_("The following cells have instances which do not have " "matching request_specs in the API database: %s Run " "'nova-manage db online_data_migrations' on each cell " "to create the missing request specs.") % ', '.join(incomplete_cells)) return UpgradeCheckResult(UpgradeCheckCode.FAILURE, msg) return UpgradeCheckResult(UpgradeCheckCode.SUCCESS)
def _check_request_spec_migration(self): """Checks to make sure request spec migrations are complete. Iterates all cells checking to see that non-deleted instances have a matching request spec in the API database. This is necessary in order to drop the migrate_instances_add_request_spec online data migration and accompanying compatibility code found through nova-api and nova-conductor. """ meta = MetaData(bind=db_session.get_api_engine()) cell_mappings = Table('cell_mappings', meta, autoload=True) mappings = cell_mappings.select().execute().fetchall() if not mappings: # There are no cell mappings so we can't determine this, just # return a warning. The cellsv2 check would have already failed # on this. msg = (_('Unable to determine request spec migrations without ' 'cell mappings.')) return upgradecheck.Result(upgradecheck.Code.WARNING, msg) request_specs = Table('request_specs', meta, autoload=True) ctxt = nova_context.get_admin_context() incomplete_cells = [] # list of cell mapping uuids for mapping in mappings: with nova_context.target_cell(ctxt, mapping) as cctxt: # Get all instance uuids for non-deleted instances in this # cell. meta = MetaData(bind=db_session.get_engine(context=cctxt)) instances = Table('instances', meta, autoload=True) instance_records = ( select([instances.c.uuid]).select_from(instances).where( instances.c.deleted == 0 ).execute().fetchall()) # For each instance in the list, verify that it has a matching # request spec in the API DB. for inst in instance_records: spec_id = ( select([request_specs.c.id]).select_from( request_specs).where( request_specs.c.instance_uuid == inst['uuid'] ).execute().scalar()) if spec_id is None: # This cell does not have all of its instances # migrated for request specs so track it and move on. incomplete_cells.append(mapping['uuid']) break # It's a failure if there are any unmigrated instances at this point # because we are planning to drop the online data migration routine and # compatibility code in Stein. if incomplete_cells: msg = (_("The following cells have instances which do not have " "matching request_specs in the API database: %s Run " "'nova-manage db online_data_migrations' on each cell " "to create the missing request specs.") % ', '.join(incomplete_cells)) return upgradecheck.Result(upgradecheck.Code.FAILURE, msg) return upgradecheck.Result(upgradecheck.Code.SUCCESS)
def setUp(self): super(TestNewtonCheck, self).setUp() self.useFixture(nova_fixtures.DatabaseAtVersion(329)) self.context = context.get_admin_context() self.migration = importlib.import_module( 'nova.db.sqlalchemy.migrate_repo.versions.' '330_enforce_mitaka_online_migrations') self.engine = db_api.get_engine()
def _get_min_service_version(self, context, binary): meta = MetaData(bind=db_session.get_engine(context=context)) services = Table('services', meta, autoload=True) return select([sqlfunc.min(services.c.version) ]).select_from(services).where( and_(services.c.binary == binary, services.c.deleted == 0, services.c.forced_down == false())).scalar()
def setUp(self): super(TestServicesUUIDCheck, self).setUp() self.useFixture(nova_fixtures.Database(version=398)) self.context = context.get_admin_context() self.migration = importlib.import_module( 'nova.db.sqlalchemy.migrate_repo.versions.' '400_enforce_service_uuid') self.engine = db_api.get_engine()
def _get_min_service_version(self, context, binary): meta = MetaData(bind=db_session.get_engine(context=context)) services = Table('services', meta, autoload=True) return select([sqlfunc.min(services.c.version)]).select_from( services).where(and_( services.c.binary == binary, services.c.deleted == 0, services.c.forced_down == false())).scalar()
def _cache_schema(self): global DB_SCHEMA if not DB_SCHEMA: engine = session.get_engine() conn = engine.connect() migration.db_sync() DB_SCHEMA = "".join(line for line in conn.connection.iterdump()) engine.dispose()
def test_fixture_schema_version(self): self.useFixture(conf_fixture.ConfFixture()) # In/after 317 aggregates did have uuid self.useFixture(fixtures.DatabaseAtVersion(318)) engine = session.get_engine() engine.connect() meta = sqlalchemy.MetaData(engine) aggregate = sqlalchemy.Table('aggregates', meta, autoload=True) self.assertTrue(hasattr(aggregate.c, 'uuid')) # Before 317, aggregates had no uuid self.useFixture(fixtures.DatabaseAtVersion(316)) engine = session.get_engine() engine.connect() meta = sqlalchemy.MetaData(engine) aggregate = sqlalchemy.Table('aggregates', meta, autoload=True) self.assertFalse(hasattr(aggregate.c, 'uuid')) engine.dispose()
def setUp(self): super(TestNewtonCheck, self).setUp() # TODO(jgauld): Newton test depends on version 329. # Temporarily set to 335 to bypass this check. # Proper fix TBD. self.useFixture(nova_fixtures.DatabaseAtVersion(335)) self.context = context.get_admin_context() self.migration = importlib.import_module( 'nova.db.sqlalchemy.migrate_repo.versions.' '330_enforce_mitaka_online_migrations') self.engine = db_api.get_engine()
def _get_table_counts(self): engine = sqlalchemy_api.get_engine() conn = engine.connect() meta = MetaData(engine) meta.reflect() shadow_tables = sqlalchemy_api._purgeable_tables(meta) results = {} for table in shadow_tables: r = conn.execute(select([func.count() ]).select_from(table)).fetchone() results[table.name] = r[0] return results
def _get_table_counts(self): engine = sqlalchemy_api.get_engine() conn = engine.connect() meta = MetaData(engine) meta.reflect() shadow_tables = sqlalchemy_api._purgeable_tables(meta) results = {} for table in shadow_tables: r = conn.execute( select([func.count()]).select_from(table)).fetchone() results[table.name] = r[0] return results
def test_fixture_cleanup(self): # because this sets up reasonable db connection strings self.useFixture(conf_fixture.ConfFixture()) fix = fixtures.Database() self.useFixture(fix) # manually do the cleanup that addCleanup will do fix.cleanup() # ensure the db contains nothing engine = session.get_engine() conn = engine.connect() schema = "".join(line for line in conn.connection.iterdump()) self.assertEqual(schema, "BEGIN TRANSACTION;COMMIT;")
def setUp(self): super(TestNullInstanceUuidScanDB, self).setUp() self.engine = db_api.get_engine() # When this test runs, we've already run the schema migration to make # instances.uuid non-nullable, so we have to alter the table here # so we can test against a real database. self.downgrade(self.engine) # Now create fake entries in the fixed_ips, consoles and # instances table where (instance_)uuid is None for testing. for table_name in ('fixed_ips', 'instances', 'consoles'): table = db_utils.get_table(self.engine, table_name) fake_record = {'id': 1} table.insert().execute(fake_record)
def enforce_fk_constraints(self, engine=None): if engine is None: engine = sqlalchemy_api.get_engine() dialect = engine.url.get_dialect() if dialect == sqlite.dialect: # We're seeing issues with foreign key support in SQLite 3.6.20 # SQLAlchemy doesn't support it at all with < SQLite 3.6.19 # It works fine in SQLite 3.7. # So return early to skip this test if running SQLite < 3.7 import sqlite3 tup = sqlite3.sqlite_version_info if tup[0] < 3 or (tup[0] == 3 and tup[1] < 7): self.skipTest( 'sqlite version too old for reliable SQLA foreign_keys') engine.connect().execute("PRAGMA foreign_keys = ON")
def setUp(self): super(TestDatabaseArchive, self).setUp() # TODO(mriedem): pull this out so we can re-use it in # test_archive_deleted_rows_fk_constraint # SQLite doesn't enforce foreign key constraints without a pragma. engine = sqlalchemy_api.get_engine() dialect = engine.url.get_dialect() if dialect == sqlite.dialect: # We're seeing issues with foreign key support in SQLite 3.6.20 # SQLAlchemy doesn't support it at all with < SQLite 3.6.19 # It works fine in SQLite 3.7. # So return early to skip this test if running SQLite < 3.7 import sqlite3 tup = sqlite3.sqlite_version_info if tup[0] < 3 or (tup[0] == 3 and tup[1] < 7): self.skipTest( 'sqlite version too old for reliable SQLA foreign_keys') engine.connect().execute("PRAGMA foreign_keys = ON")
def _count_compute_nodes(self, context=None): """Returns the number of compute nodes in the cell database.""" # NOTE(mriedem): This does not filter based on the service status # because a disabled nova-compute service could still be reporting # inventory info to the placement service. There could be an outside # chance that there are compute node records in the database for # disabled nova-compute services that aren't yet upgraded to Ocata or # the nova-compute service was deleted and the service isn't actually # running on the compute host but the operator hasn't cleaned up the # compute_nodes entry in the database yet. We consider those edge cases # here and the worst case scenario is we give a warning that there are # more compute nodes than resource providers. We can tighten this up # later if needed, for example by not including compute nodes that # don't have a corresponding nova-compute service in the services # table, or by only counting compute nodes with a service version of at # least 15 which was the highest service version when Newton was # released. meta = MetaData(bind=db_session.get_engine(context=context)) compute_nodes = Table('compute_nodes', meta, autoload=True) return select([sqlfunc.count()]).select_from(compute_nodes).scalar()
def setup_pci_device(self, dev_type): # NOTE(jaypipes): We cannot use db_api.pci_device_update() here because # newer models of PciDevice contain fields (uuid) that are not present # in the older Newton DB schema and pci_device_update() uses the # SQLAlchemy ORM model_query().update() form which will produce an # UPDATE SQL statement that contains those new fields, resulting in an # OperationalError about table pci_devices has no such column uuid. engine = db_api.get_engine() tbl = models.PciDevice.__table__ with engine.connect() as conn: ins_stmt = tbl.insert().values( address='foo:bar', compute_node_id=1, parent_addr=None, vendor_id='123', product_id='456', dev_type=dev_type, label='foobar', status='whatisthis?', ) conn.execute(ins_stmt)
def setUp(self): super(TestOcataCheck, self).setUp() self.context = context.get_admin_context() self.migration = importlib.import_module( 'nova.db.sqlalchemy.migrate_repo.versions.' '345_require_online_migration_completion') self.engine = db_api.get_engine() self.flavor_values = { 'name': 'foo', 'memory_mb': 256, 'vcpus': 1, 'root_gb': 10, 'ephemeral_gb': 100, 'flavorid': 'bar', 'swap': 1, 'rxtx_factor': 1.0, 'vcpu_weight': 1, 'disabled': False, 'is_public': True, 'deleted': 0 } self.keypair_values = { 'name': 'foo', 'user_ud': 'bar', 'fingerprint': 'baz', 'public_key': 'bat', 'type': 'ssh', } self.aggregate_values = { 'uuid': uuidsentinel.agg, 'name': 'foo', } self.ig_values = { 'user_id': 'foo', 'project_id': 'bar', 'uuid': uuidsentinel.ig, 'name': 'baz', 'deleted': 0 }
def _check_console_auths(self): """Checks for console usage and warns with info for rolling upgrade. Iterates all cells checking to see if the nova-consoleauth service is non-deleted/non-disabled and whether there are any console token auths in that cell database. If there is a nova-consoleauth service being used and no console token auths in the cell database, emit a warning telling the user to set [workarounds]enable_consoleauth = True if they are performing a rolling upgrade. """ # If we're using cells v1, we don't need to check if the workaround # needs to be used because cells v1 always uses nova-consoleauth. # If the operator has already enabled the workaround, we don't need # to check anything. if CONF.cells.enable or CONF.workarounds.enable_consoleauth: return upgradecheck.Result(upgradecheck.Code.SUCCESS) # We need to check cell0 for nova-consoleauth service records because # it's possible a deployment could have services stored in the cell0 # database, if they've defaulted their [database]connection in # nova.conf to cell0. meta = MetaData(bind=db_session.get_api_engine()) cell_mappings = Table('cell_mappings', meta, autoload=True) mappings = cell_mappings.select().execute().fetchall() if not mappings: # There are no cell mappings so we can't determine this, just # return a warning. The cellsv2 check would have already failed # on this. msg = (_('Unable to check consoles without cell mappings.')) return upgradecheck.Result(upgradecheck.Code.WARNING, msg) ctxt = nova_context.get_admin_context() # If we find a non-deleted, non-disabled nova-consoleauth service in # any cell, we will assume the deployment is using consoles. using_consoles = False for mapping in mappings: with nova_context.target_cell(ctxt, mapping) as cctxt: # Check for any non-deleted, non-disabled nova-consoleauth # service. meta = MetaData(bind=db_session.get_engine(context=cctxt)) services = Table('services', meta, autoload=True) consoleauth_service_record = (select([ services.c.id ]).select_from(services).where( and_(services.c.binary == 'nova-consoleauth', services.c.deleted == 0, services.c.disabled == false())).execute().first()) if consoleauth_service_record: using_consoles = True break if using_consoles: # If the deployment is using consoles, we can only be certain the # upgrade is complete if each compute service is >= Rocky and # supports storing console token auths in the database backend. for mapping in mappings: # Skip cell0 as no compute services should be in it. if mapping['uuid'] == cell_mapping_obj.CellMapping.CELL0_UUID: continue # Get the minimum nova-compute service version in this # cell. with nova_context.target_cell(ctxt, mapping) as cctxt: min_version = self._get_min_service_version( cctxt, 'nova-compute') # We could get None for the minimum version in the case of # new install where there are no computes. If there are # compute services, they should all have versions. if min_version is not None and min_version < 35: msg = _("One or more cells were found which have " "nova-compute services older than Rocky. " "Please set the " "'[workarounds]enable_consoleauth' " "configuration option to 'True' on your " "console proxy host if you are performing a " "rolling upgrade to enable consoles to " "function during a partial upgrade.") return upgradecheck.Result(upgradecheck.Code.WARNING, msg) return upgradecheck.Result(upgradecheck.Code.SUCCESS)
def enforce_fk_constraints(self, engine=None): if engine is None: engine = sqlalchemy_api.get_engine() dialect = engine.url.get_dialect() if dialect == sqlite.dialect: engine.connect().execute("PRAGMA foreign_keys = ON")
def get_engine(database='main', context=None): if database == 'main': return db_session.get_engine(context=context) if database == 'api': return db_session.get_api_engine()
def get_engine(database='main'): if database == 'main': return db_session.get_engine() if database == 'api': return db_session.get_api_engine()
def _check_console_auths(self): """Checks for console usage and warns with info for rolling upgrade. Iterates all cells checking to see if the nova-consoleauth service is non-deleted/non-disabled and whether there are any console token auths in that cell database. If there is a nova-consoleauth service being used and no console token auths in the cell database, emit a warning telling the user to set [workarounds]enable_consoleauth = True if they are performing a rolling upgrade. """ # If we're using cells v1, we don't need to check if the workaround # needs to be used because cells v1 always uses nova-consoleauth. # If the operator has already enabled the workaround, we don't need # to check anything. if CONF.cells.enable or CONF.workarounds.enable_consoleauth: return upgradecheck.Result(upgradecheck.Code.SUCCESS) # We need to check cell0 for nova-consoleauth service records because # it's possible a deployment could have services stored in the cell0 # database, if they've defaulted their [database]connection in # nova.conf to cell0. meta = MetaData(bind=db_session.get_api_engine()) cell_mappings = Table('cell_mappings', meta, autoload=True) mappings = cell_mappings.select().execute().fetchall() if not mappings: # There are no cell mappings so we can't determine this, just # return a warning. The cellsv2 check would have already failed # on this. msg = (_('Unable to check consoles without cell mappings.')) return upgradecheck.Result(upgradecheck.Code.WARNING, msg) ctxt = nova_context.get_admin_context() # If we find a non-deleted, non-disabled nova-consoleauth service in # any cell, we will assume the deployment is using consoles. using_consoles = False for mapping in mappings: with nova_context.target_cell(ctxt, mapping) as cctxt: # Check for any non-deleted, non-disabled nova-consoleauth # service. meta = MetaData(bind=db_session.get_engine(context=cctxt)) services = Table('services', meta, autoload=True) consoleauth_service_record = ( select([services.c.id]).select_from(services).where(and_( services.c.binary == 'nova-consoleauth', services.c.deleted == 0, services.c.disabled == false())).execute().first()) if consoleauth_service_record: using_consoles = True break if using_consoles: # If the deployment is using consoles, we can only be certain the # upgrade is complete if each compute service is >= Rocky and # supports storing console token auths in the database backend. for mapping in mappings: # Skip cell0 as no compute services should be in it. if mapping['uuid'] == cell_mapping_obj.CellMapping.CELL0_UUID: continue # Get the minimum nova-compute service version in this # cell. with nova_context.target_cell(ctxt, mapping) as cctxt: min_version = self._get_min_service_version( cctxt, 'nova-compute') # We could get None for the minimum version in the case of # new install where there are no computes. If there are # compute services, they should all have versions. if min_version is not None and min_version < 35: msg = _("One or more cells were found which have " "nova-compute services older than Rocky. " "Please set the " "'[workarounds]enable_consoleauth' " "configuration option to 'True' on your " "console proxy host if you are performing a " "rolling upgrade to enable consoles to " "function during a partial upgrade.") return upgradecheck.Result(upgradecheck.Code.WARNING, msg) return upgradecheck.Result(upgradecheck.Code.SUCCESS)
def reset(self): self._cache_schema() engine = session.get_engine() engine.dispose() conn = engine.connect() conn.connection.executescript(DB_SCHEMA)
def _count_compute_nodes(self): """Returns the number of compute nodes in the cell database.""" meta = MetaData(bind=db_session.get_engine()) compute_nodes = Table('compute_nodes', meta, autoload=True) return select([sqlfunc.count()]).select_from(compute_nodes).scalar()
def _check_ironic_flavor_migration(self): """In Pike, ironic instances and flavors need to be migrated to use custom resource classes. In ironic, the node.resource_class should be set to some custom resource class value which should match a "resources:<custom resource class name>" flavor extra spec on baremetal flavors. Existing ironic instances will have their embedded instance.flavor.extra_specs migrated to use the matching ironic node.resource_class value in the nova-compute service, or they can be forcefully migrated using "nova-manage db ironic_flavor_migration". In this check, we look for all ironic compute nodes in all non-cell0 cells, and from those ironic compute nodes, we look for an instance that has a "resources:CUSTOM_*" key in it's embedded flavor extra specs. """ cell_mappings = self._get_non_cell0_mappings() ctxt = nova_context.get_admin_context() # dict of cell identifier (name or uuid) to number of unmigrated # instances unmigrated_instance_count_by_cell = collections.defaultdict(int) for cell_mapping in cell_mappings: with nova_context.target_cell(ctxt, cell_mapping) as cctxt: # Get the (non-deleted) ironic compute nodes in this cell. meta = MetaData(bind=db_session.get_engine(context=cctxt)) compute_nodes = Table('compute_nodes', meta, autoload=True) ironic_nodes = ( compute_nodes.select().where(and_( compute_nodes.c.hypervisor_type == 'ironic', compute_nodes.c.deleted == 0 )).execute().fetchall()) if ironic_nodes: # We have ironic nodes in this cell, let's iterate over # them looking for instances. instances = Table('instances', meta, autoload=True) extras = Table('instance_extra', meta, autoload=True) for node in ironic_nodes: nodename = node['hypervisor_hostname'] # Get any (non-deleted) instances for this node. ironic_instances = ( instances.select().where(and_( instances.c.node == nodename, instances.c.deleted == 0 )).execute().fetchall()) # Get the instance_extras for each instance so we can # find the flavors. for inst in ironic_instances: if not self._is_ironic_instance_migrated( extras, inst): # We didn't find the extra spec key for this # instance so increment the number of # unmigrated instances in this cell. unmigrated_instance_count_by_cell[ cell_mapping['uuid']] += 1 if not cell_mappings: # There are no non-cell0 mappings so we can't determine this, just # return a warning. The cellsv2 check would have already failed # on this. msg = (_('Unable to determine ironic flavor migration without ' 'cell mappings.')) return UpgradeCheckResult(UpgradeCheckCode.WARNING, msg) if unmigrated_instance_count_by_cell: # There are unmigrated ironic instances, so we need to fail. msg = (_('There are (cell=x) number of unmigrated instances in ' 'each cell: %s. Run \'nova-manage db ' 'ironic_flavor_migration\' on each cell.') % ' '.join('(%s=%s)' % ( cell_id, unmigrated_instance_count_by_cell[cell_id]) for cell_id in sorted(unmigrated_instance_count_by_cell.keys()))) return UpgradeCheckResult(UpgradeCheckCode.FAILURE, msg) # Either there were no ironic compute nodes or all instances for # those nodes are already migrated, so there is nothing to do. return UpgradeCheckResult(UpgradeCheckCode.SUCCESS)
def _check_ironic_flavor_migration(self): """In Pike, ironic instances and flavors need to be migrated to use custom resource classes. In ironic, the node.resource_class should be set to some custom resource class value which should match a "resources:<custom resource class name>" flavor extra spec on baremetal flavors. Existing ironic instances will have their embedded instance.flavor.extra_specs migrated to use the matching ironic node.resource_class value in the nova-compute service, or they can be forcefully migrated using "nova-manage db ironic_flavor_migration". In this check, we look for all ironic compute nodes in all non-cell0 cells, and from those ironic compute nodes, we look for an instance that has a "resources:CUSTOM_*" key in it's embedded flavor extra specs. """ cell_mappings = self._get_non_cell0_mappings() ctxt = nova_context.get_admin_context() # dict of cell identifier (name or uuid) to number of unmigrated # instances unmigrated_instance_count_by_cell = collections.defaultdict(int) for cell_mapping in cell_mappings: with nova_context.target_cell(ctxt, cell_mapping) as cctxt: # Get the (non-deleted) ironic compute nodes in this cell. meta = MetaData(bind=db_session.get_engine(context=cctxt)) compute_nodes = Table('compute_nodes', meta, autoload=True) ironic_nodes = (compute_nodes.select().where( and_(compute_nodes.c.hypervisor_type == 'ironic', compute_nodes.c.deleted == 0)).execute().fetchall()) if ironic_nodes: # We have ironic nodes in this cell, let's iterate over # them looking for instances. instances = Table('instances', meta, autoload=True) extras = Table('instance_extra', meta, autoload=True) for node in ironic_nodes: nodename = node['hypervisor_hostname'] # Get any (non-deleted) instances for this node. ironic_instances = (instances.select().where( and_(instances.c.node == nodename, instances.c.deleted == 0)).execute().fetchall()) # Get the instance_extras for each instance so we can # find the flavors. for inst in ironic_instances: if not self._is_ironic_instance_migrated( extras, inst): # We didn't find the extra spec key for this # instance so increment the number of # unmigrated instances in this cell. unmigrated_instance_count_by_cell[ cell_mapping.uuid] += 1 if not cell_mappings: # There are no non-cell0 mappings so we can't determine this, just # return a warning. The cellsv2 check would have already failed # on this. msg = (_('Unable to determine ironic flavor migration without ' 'cell mappings.')) return upgradecheck.Result(upgradecheck.Code.WARNING, msg) if unmigrated_instance_count_by_cell: # There are unmigrated ironic instances, so we need to fail. msg = ( _('There are (cell=x) number of unmigrated instances in ' 'each cell: %s. Run \'nova-manage db ' 'ironic_flavor_migration\' on each cell.') % ' '.join('(%s=%s)' % (cell_id, unmigrated_instance_count_by_cell[cell_id]) for cell_id in sorted( unmigrated_instance_count_by_cell.keys()))) return upgradecheck.Result(upgradecheck.Code.FAILURE, msg) # Either there were no ironic compute nodes or all instances for # those nodes are already migrated, so there is nothing to do. return upgradecheck.Result(upgradecheck.Code.SUCCESS)