def get_engine(database='main', context=None): if database == 'main': return db_session.get_engine(context=context) if database == 'api': return db_session.get_api_engine() if database == 'placement': return placement_db.get_placement_engine()
def get_engine(database='main'): if database == 'main': return db_session.get_engine() if database == 'api': return db_session.get_api_engine() if database == 'placement': return db_session.get_placement_engine()
def _get_non_cell0_mappings(): """Queries the API database for non-cell0 cell mappings.""" meta = MetaData(bind=db_session.get_api_engine()) cell_mappings = Table('cell_mappings', meta, autoload=True) return cell_mappings.select().where( cell_mappings.c.uuid != cell_mapping_obj.CellMapping.CELL0_UUID).execute().fetchall()
def test_api_fixture_reset(self): # This sets up reasonable db connection strings self.useFixture(conf_fixture.ConfFixture()) self.useFixture(fixtures.Database(database='api')) engine = session.get_api_engine() conn = engine.connect() result = conn.execute("select * from cell_mappings") rows = result.fetchall() self.assertEqual(0, len(rows), "Rows %s" % rows) uuid = uuidutils.generate_uuid() conn.execute("insert into cell_mappings (uuid, name) VALUES " "('%s', 'fake-cell')" % (uuid,)) result = conn.execute("select * from cell_mappings") rows = result.fetchall() self.assertEqual(1, len(rows), "Rows %s" % rows) # reset by invoking the fixture again # # NOTE(sdague): it's important to reestablish the db # connection because otherwise we have a reference to the old # in mem db. self.useFixture(fixtures.Database(database='api')) conn = engine.connect() result = conn.execute("select * from cell_mappings") rows = result.fetchall() self.assertEqual(0, len(rows), "Rows %s" % rows)
def _get_non_cell0_mappings(): """Queries the API database for non-cell0 cell mappings.""" meta = MetaData(bind=db_session.get_api_engine()) cell_mappings = Table('cell_mappings', meta, autoload=True) return cell_mappings.select().where( cell_mappings.c.uuid != cell_mapping_obj.CellMapping.CELL0_UUID ).execute().fetchall()
def test_api_fixture_reset(self): # This sets up reasonable db connection strings self.useFixture(conf_fixture.ConfFixture()) self.useFixture(fixtures.Database(database='api')) engine = session.get_api_engine() conn = engine.connect() result = conn.execute("select * from cell_mappings") rows = result.fetchall() self.assertEqual(0, len(rows), "Rows %s" % rows) uuid = uuidutils.generate_uuid() conn.execute("insert into cell_mappings (uuid, name) VALUES " "('%s', 'fake-cell')" % (uuid, )) result = conn.execute("select * from cell_mappings") rows = result.fetchall() self.assertEqual(1, len(rows), "Rows %s" % rows) # reset by invoking the fixture again # # NOTE(sdague): it's important to reestablish the db # connection because otherwise we have a reference to the old # in mem db. self.useFixture(fixtures.Database(database='api')) conn = engine.connect() result = conn.execute("select * from cell_mappings") rows = result.fetchall() self.assertEqual(0, len(rows), "Rows %s" % rows)
def _check_request_spec_migration(self): """Checks to make sure request spec migrations are complete. Iterates all cells checking to see that non-deleted instances have a matching request spec in the API database. This is necessary in order to drop the migrate_instances_add_request_spec online data migration and accompanying compatibility code found through nova-api and nova-conductor. """ meta = MetaData(bind=db_session.get_api_engine()) cell_mappings = Table('cell_mappings', meta, autoload=True) mappings = cell_mappings.select().execute().fetchall() if not mappings: # There are no cell mappings so we can't determine this, just # return a warning. The cellsv2 check would have already failed # on this. msg = (_('Unable to determine request spec migrations without ' 'cell mappings.')) return UpgradeCheckResult(UpgradeCheckCode.WARNING, msg) request_specs = Table('request_specs', meta, autoload=True) ctxt = nova_context.get_admin_context() incomplete_cells = [] # list of cell mapping uuids for mapping in mappings: with nova_context.target_cell(ctxt, mapping) as cctxt: # Get all instance uuids for non-deleted instances in this # cell. meta = MetaData(bind=db_session.get_engine(context=cctxt)) instances = Table('instances', meta, autoload=True) instance_records = ( select([instances.c.uuid]).select_from(instances).where( instances.c.deleted == 0 ).execute().fetchall()) # For each instance in the list, verify that it has a matching # request spec in the API DB. for inst in instance_records: spec_id = ( select([request_specs.c.id]).select_from( request_specs).where( request_specs.c.instance_uuid == inst['uuid'] ).execute().scalar()) if spec_id is None: # This cell does not have all of its instances # migrated for request specs so track it and move on. incomplete_cells.append(mapping['uuid']) break # It's a failure if there are any unmigrated instances at this point # because we are planning to drop the online data migration routine and # compatibility code in Stein. if incomplete_cells: msg = (_("The following cells have instances which do not have " "matching request_specs in the API database: %s Run " "'nova-manage db online_data_migrations' on each cell " "to create the missing request specs.") % ', '.join(incomplete_cells)) return UpgradeCheckResult(UpgradeCheckCode.FAILURE, msg) return UpgradeCheckResult(UpgradeCheckCode.SUCCESS)
def _check_request_spec_migration(self): """Checks to make sure request spec migrations are complete. Iterates all cells checking to see that non-deleted instances have a matching request spec in the API database. This is necessary in order to drop the migrate_instances_add_request_spec online data migration and accompanying compatibility code found through nova-api and nova-conductor. """ meta = MetaData(bind=db_session.get_api_engine()) cell_mappings = Table('cell_mappings', meta, autoload=True) mappings = cell_mappings.select().execute().fetchall() if not mappings: # There are no cell mappings so we can't determine this, just # return a warning. The cellsv2 check would have already failed # on this. msg = (_('Unable to determine request spec migrations without ' 'cell mappings.')) return upgradecheck.Result(upgradecheck.Code.WARNING, msg) request_specs = Table('request_specs', meta, autoload=True) ctxt = nova_context.get_admin_context() incomplete_cells = [] # list of cell mapping uuids for mapping in mappings: with nova_context.target_cell(ctxt, mapping) as cctxt: # Get all instance uuids for non-deleted instances in this # cell. meta = MetaData(bind=db_session.get_engine(context=cctxt)) instances = Table('instances', meta, autoload=True) instance_records = ( select([instances.c.uuid]).select_from(instances).where( instances.c.deleted == 0 ).execute().fetchall()) # For each instance in the list, verify that it has a matching # request spec in the API DB. for inst in instance_records: spec_id = ( select([request_specs.c.id]).select_from( request_specs).where( request_specs.c.instance_uuid == inst['uuid'] ).execute().scalar()) if spec_id is None: # This cell does not have all of its instances # migrated for request specs so track it and move on. incomplete_cells.append(mapping['uuid']) break # It's a failure if there are any unmigrated instances at this point # because we are planning to drop the online data migration routine and # compatibility code in Stein. if incomplete_cells: msg = (_("The following cells have instances which do not have " "matching request_specs in the API database: %s Run " "'nova-manage db online_data_migrations' on each cell " "to create the missing request specs.") % ', '.join(incomplete_cells)) return upgradecheck.Result(upgradecheck.Code.FAILURE, msg) return upgradecheck.Result(upgradecheck.Code.SUCCESS)
def setUp(self): super(TestNewtonCellsCheck, self).setUp() self.useFixture(nova_fixtures.DatabaseAtVersion(28, 'api')) self.context = context.get_admin_context() self.migration = importlib.import_module( 'nova.db.sqlalchemy.api_migrations.migrate_repo.versions.' '030_require_cell_setup') self.engine = db_api.get_api_engine()
def _check_api_service_version(self): """Checks nova-osapi_compute service versions across cells. For non-cellsv1 deployments, based on how the [database]/connection is configured for the nova-api service, the nova-osapi_compute service versions before 15 will only attempt to lookup instances from the local database configured for the nova-api service directly. This can cause issues if there are newer API service versions in cell1 after the upgrade to Ocata, but lingering older API service versions in an older database. This check will scan all cells looking for a minimum nova-osapi_compute service version less than 15 and if found, emit a warning that those service entries likely need to be cleaned up. """ # If we're using cells v1 then we don't care about this. if CONF.cells.enable: return UpgradeCheckResult(UpgradeCheckCode.SUCCESS) meta = MetaData(bind=db_session.get_api_engine()) cell_mappings = Table('cell_mappings', meta, autoload=True) mappings = cell_mappings.select().execute().fetchall() if not mappings: # There are no cell mappings so we can't determine this, just # return a warning. The cellsv2 check would have already failed # on this. msg = (_('Unable to determine API service versions without ' 'cell mappings.')) return UpgradeCheckResult(UpgradeCheckCode.WARNING, msg) ctxt = nova_context.get_admin_context() cells_with_old_api_services = [] for mapping in mappings: with nova_context.target_cell(ctxt, mapping) as cctxt: # Get the minimum nova-osapi_compute service version in this # cell. min_version = self._get_min_service_version( cctxt, 'nova-osapi_compute') if min_version is not None and min_version < 15: cells_with_old_api_services.append(mapping['uuid']) # If there are any cells with older API versions, we report it as a # warning since we don't know how the actual nova-api service is # configured, but we need to give the operator some indication that # they have something to investigate/cleanup. if cells_with_old_api_services: msg = (_("The following cells have 'nova-osapi_compute' services " "with version < 15 which may cause issues when querying " "instances from the API: %s. Depending on how nova-api " "is configured, this may not be a problem, but is worth " "investigating and potentially cleaning up those older " "records. See " "https://bugs.launchpad.net/nova/+bug/1759316 for " "details.") % ', '.join(cells_with_old_api_services)) return UpgradeCheckResult(UpgradeCheckCode.WARNING, msg) return UpgradeCheckResult(UpgradeCheckCode.SUCCESS)
def _adjust_autoincrement(context, value): engine = db_api.get_api_engine() if engine.name == 'postgresql': # NOTE(danms): If we migrated some flavors in the above function, # then we will have confused postgres' sequence for the autoincrement # primary key. MySQL does not care about this, but since postgres does, # we need to reset this to avoid a failure on the next flavor creation. engine.execute( text('ALTER SEQUENCE flavors_id_seq RESTART WITH %i;' % (value)))
def _adjust_autoincrement(context, value): engine = db_api.get_api_engine() if engine.name == 'postgresql': # NOTE(danms): If we migrated some flavors in the above function, # then we will have confused postgres' sequence for the autoincrement # primary key. MySQL does not care about this, but since postgres does, # we need to reset this to avoid a failure on the next flavor creation. engine.execute( text('ALTER SEQUENCE flavors_id_seq RESTART WITH %i;' % ( value)))
def _check_cellsv2(self): """Checks to see if cells v2 has been setup. These are the same checks performed in the 030_require_cell_setup API DB migration except we expect this to be run AFTER the nova-manage cell_v2 simple_cell_setup command, which would create the cell and host mappings and sync the cell0 database schema, so we don't check for flavors at all because you could create those after doing this on an initial install. This also has to be careful about checking for compute nodes if there are no host mappings on a fresh install. """ meta = MetaData() meta.bind = db_session.get_api_engine() cell_mappings = Table('cell_mappings', meta, autoload=True) count = select([sqlfunc.count()]).select_from(cell_mappings).scalar() # Two mappings are required at a minimum, cell0 and your first cell if count < 2: msg = _('There needs to be at least two cell mappings, one for ' 'cell0 and one for your first cell. Run command ' '\'nova-manage cell_v2 simple_cell_setup\' and then ' 'retry.') return UpgradeCheckResult(UpgradeCheckCode.FAILURE, msg) count = select([sqlfunc.count()]).select_from(cell_mappings).where( cell_mappings.c.uuid == cell_mapping_obj.CellMapping.CELL0_UUID).scalar() if count != 1: msg = _('No cell0 mapping found. Run command ' '\'nova-manage cell_v2 simple_cell_setup\' and then ' 'retry.') return UpgradeCheckResult(UpgradeCheckCode.FAILURE, msg) host_mappings = Table('host_mappings', meta, autoload=True) count = select([sqlfunc.count()]).select_from(host_mappings).scalar() if count == 0: # This may be a fresh install in which case there may not be any # compute_nodes in the cell database if the nova-compute service # hasn't started yet to create those records. So let's query the # cell database for compute_nodes records and if we find at least # one it's a failure. num_computes = self._count_compute_nodes() if num_computes > 0: msg = _('No host mappings found but there are compute nodes. ' 'Run command \'nova-manage cell_v2 ' 'simple_cell_setup\' and then retry.') return UpgradeCheckResult(UpgradeCheckCode.FAILURE, msg) msg = _('No host mappings or compute nodes were found. Remember ' 'to run command \'nova-manage cell_v2 discover_hosts\' ' 'when new compute hosts are deployed.') return UpgradeCheckResult(UpgradeCheckCode.SUCCESS, msg) return UpgradeCheckResult(UpgradeCheckCode.SUCCESS)
def test_api_fixture_cleanup(self): # This sets up reasonable db connection strings self.useFixture(conf_fixture.ConfFixture()) fix = fixtures.Database(database='api') self.useFixture(fix) # No data inserted by migrations so we need to add a row engine = session.get_api_engine() conn = engine.connect() uuid = uuidutils.generate_uuid() conn.execute("insert into cell_mappings (uuid, name) VALUES " "('%s', 'fake-cell')" % (uuid, )) result = conn.execute("select * from cell_mappings") rows = result.fetchall() self.assertEqual(1, len(rows), "Rows %s" % rows) # Manually do the cleanup that addCleanup will do fix.cleanup() # Ensure the db contains nothing engine = session.get_api_engine() conn = engine.connect() schema = "".join(line for line in conn.connection.iterdump()) self.assertEqual("BEGIN TRANSACTION;COMMIT;", schema)
def test_api_fixture_cleanup(self): # This sets up reasonable db connection strings self.useFixture(conf_fixture.ConfFixture()) fix = fixtures.Database(database='api') self.useFixture(fix) # No data inserted by migrations so we need to add a row engine = session.get_api_engine() conn = engine.connect() uuid = uuidutils.generate_uuid() conn.execute("insert into cell_mappings (uuid, name) VALUES " "('%s', 'fake-cell')" % (uuid,)) result = conn.execute("select * from cell_mappings") rows = result.fetchall() self.assertEqual(1, len(rows), "Rows %s" % rows) # Manually do the cleanup that addCleanup will do fix.cleanup() # Ensure the db contains nothing engine = session.get_api_engine() conn = engine.connect() schema = "".join(line for line in conn.connection.iterdump()) self.assertEqual("BEGIN TRANSACTION;COMMIT;", schema)
def test_flavors(self): self.useFixture(conf_fixture.ConfFixture()) self.useFixture(fixtures.Database()) self.useFixture(fixtures.Database(database='api')) engine = session.get_api_engine() conn = engine.connect() result = conn.execute("select * from flavors") rows = result.fetchall() self.assertEqual(0, len(rows), "Rows %s" % rows) self.useFixture(fixtures.DefaultFlavorsFixture()) result = conn.execute("select * from flavors") rows = result.fetchall() self.assertEqual(5, len(rows), "Rows %s" % rows)
def test_flavors(self, mock_send_notification): self.useFixture(conf_fixture.ConfFixture()) self.useFixture(fixtures.Database()) self.useFixture(fixtures.Database(database='api')) engine = session.get_api_engine() conn = engine.connect() result = conn.execute("select * from flavors") rows = result.fetchall() self.assertEqual(0, len(rows), "Rows %s" % rows) self.useFixture(fixtures.DefaultFlavorsFixture()) result = conn.execute("select * from flavors") rows = result.fetchall() self.assertEqual(6, len(rows), "Rows %s" % rows)
def _count_compute_resource_providers(): """Returns the number of compute resource providers in the API database The resource provider count is filtered based on resource providers which have inventories records for the VCPU resource class, which is assumed to only come from the ResourceTracker in compute nodes. """ # TODO(mriedem): If/when we support a separate placement database this # will need to change to just use the REST API. # Get the VCPU resource class ID for filtering. vcpu_rc_id = fields.ResourceClass.STANDARD.index( fields.ResourceClass.VCPU) # The inventories table has a unique constraint per resource provider # and resource class, so we can simply count the number of inventories # records for the given resource class and those will uniquely identify # the number of resource providers we care about. meta = MetaData(bind=db_session.get_api_engine()) inventories = Table('inventories', meta, autoload=True) return select([sqlfunc.count()]).select_from(inventories).where( inventories.c.resource_class_id == vcpu_rc_id).scalar()
def _count_compute_resource_providers(): """Returns the number of compute resource providers in the API database The resource provider count is filtered based on resource providers which have inventories records for the VCPU resource class, which is assumed to only come from the ResourceTracker in compute nodes. """ # TODO(mriedem): If/when we support a separate placement database this # will need to change to just use the REST API. # Get the VCPU resource class ID for filtering. vcpu_rc_id = fields.ResourceClass.STANDARD.index( fields.ResourceClass.VCPU) # The inventories table has a unique constraint per resource provider # and resource class, so we can simply count the number of inventories # records for the given resource class and those will uniquely identify # the number of resource providers we care about. meta = MetaData(bind=db_session.get_api_engine()) inventories = Table('inventories', meta, autoload=True) return select([sqlfunc.count()]).select_from( inventories).where( inventories.c.resource_class_id == vcpu_rc_id).scalar()
def get_engine(database='main'): if database == 'main': return db_session.get_engine() if database == 'api': return db_session.get_api_engine()
def _check_console_auths(self): """Checks for console usage and warns with info for rolling upgrade. Iterates all cells checking to see if the nova-consoleauth service is non-deleted/non-disabled and whether there are any console token auths in that cell database. If there is a nova-consoleauth service being used and no console token auths in the cell database, emit a warning telling the user to set [workarounds]enable_consoleauth = True if they are performing a rolling upgrade. """ # If we're using cells v1, we don't need to check if the workaround # needs to be used because cells v1 always uses nova-consoleauth. # If the operator has already enabled the workaround, we don't need # to check anything. if CONF.cells.enable or CONF.workarounds.enable_consoleauth: return upgradecheck.Result(upgradecheck.Code.SUCCESS) # We need to check cell0 for nova-consoleauth service records because # it's possible a deployment could have services stored in the cell0 # database, if they've defaulted their [database]connection in # nova.conf to cell0. meta = MetaData(bind=db_session.get_api_engine()) cell_mappings = Table('cell_mappings', meta, autoload=True) mappings = cell_mappings.select().execute().fetchall() if not mappings: # There are no cell mappings so we can't determine this, just # return a warning. The cellsv2 check would have already failed # on this. msg = (_('Unable to check consoles without cell mappings.')) return upgradecheck.Result(upgradecheck.Code.WARNING, msg) ctxt = nova_context.get_admin_context() # If we find a non-deleted, non-disabled nova-consoleauth service in # any cell, we will assume the deployment is using consoles. using_consoles = False for mapping in mappings: with nova_context.target_cell(ctxt, mapping) as cctxt: # Check for any non-deleted, non-disabled nova-consoleauth # service. meta = MetaData(bind=db_session.get_engine(context=cctxt)) services = Table('services', meta, autoload=True) consoleauth_service_record = ( select([services.c.id]).select_from(services).where(and_( services.c.binary == 'nova-consoleauth', services.c.deleted == 0, services.c.disabled == false())).execute().first()) if consoleauth_service_record: using_consoles = True break if using_consoles: # If the deployment is using consoles, we can only be certain the # upgrade is complete if each compute service is >= Rocky and # supports storing console token auths in the database backend. for mapping in mappings: # Skip cell0 as no compute services should be in it. if mapping['uuid'] == cell_mapping_obj.CellMapping.CELL0_UUID: continue # Get the minimum nova-compute service version in this # cell. with nova_context.target_cell(ctxt, mapping) as cctxt: min_version = self._get_min_service_version( cctxt, 'nova-compute') # We could get None for the minimum version in the case of # new install where there are no computes. If there are # compute services, they should all have versions. if min_version is not None and min_version < 35: msg = _("One or more cells were found which have " "nova-compute services older than Rocky. " "Please set the " "'[workarounds]enable_consoleauth' " "configuration option to 'True' on your " "console proxy host if you are performing a " "rolling upgrade to enable consoles to " "function during a partial upgrade.") return upgradecheck.Result(upgradecheck.Code.WARNING, msg) return upgradecheck.Result(upgradecheck.Code.SUCCESS)
def _check_console_auths(self): """Checks for console usage and warns with info for rolling upgrade. Iterates all cells checking to see if the nova-consoleauth service is non-deleted/non-disabled and whether there are any console token auths in that cell database. If there is a nova-consoleauth service being used and no console token auths in the cell database, emit a warning telling the user to set [workarounds]enable_consoleauth = True if they are performing a rolling upgrade. """ # If we're using cells v1, we don't need to check if the workaround # needs to be used because cells v1 always uses nova-consoleauth. # If the operator has already enabled the workaround, we don't need # to check anything. if CONF.cells.enable or CONF.workarounds.enable_consoleauth: return upgradecheck.Result(upgradecheck.Code.SUCCESS) # We need to check cell0 for nova-consoleauth service records because # it's possible a deployment could have services stored in the cell0 # database, if they've defaulted their [database]connection in # nova.conf to cell0. meta = MetaData(bind=db_session.get_api_engine()) cell_mappings = Table('cell_mappings', meta, autoload=True) mappings = cell_mappings.select().execute().fetchall() if not mappings: # There are no cell mappings so we can't determine this, just # return a warning. The cellsv2 check would have already failed # on this. msg = (_('Unable to check consoles without cell mappings.')) return upgradecheck.Result(upgradecheck.Code.WARNING, msg) ctxt = nova_context.get_admin_context() # If we find a non-deleted, non-disabled nova-consoleauth service in # any cell, we will assume the deployment is using consoles. using_consoles = False for mapping in mappings: with nova_context.target_cell(ctxt, mapping) as cctxt: # Check for any non-deleted, non-disabled nova-consoleauth # service. meta = MetaData(bind=db_session.get_engine(context=cctxt)) services = Table('services', meta, autoload=True) consoleauth_service_record = (select([ services.c.id ]).select_from(services).where( and_(services.c.binary == 'nova-consoleauth', services.c.deleted == 0, services.c.disabled == false())).execute().first()) if consoleauth_service_record: using_consoles = True break if using_consoles: # If the deployment is using consoles, we can only be certain the # upgrade is complete if each compute service is >= Rocky and # supports storing console token auths in the database backend. for mapping in mappings: # Skip cell0 as no compute services should be in it. if mapping['uuid'] == cell_mapping_obj.CellMapping.CELL0_UUID: continue # Get the minimum nova-compute service version in this # cell. with nova_context.target_cell(ctxt, mapping) as cctxt: min_version = self._get_min_service_version( cctxt, 'nova-compute') # We could get None for the minimum version in the case of # new install where there are no computes. If there are # compute services, they should all have versions. if min_version is not None and min_version < 35: msg = _("One or more cells were found which have " "nova-compute services older than Rocky. " "Please set the " "'[workarounds]enable_consoleauth' " "configuration option to 'True' on your " "console proxy host if you are performing a " "rolling upgrade to enable consoles to " "function during a partial upgrade.") return upgradecheck.Result(upgradecheck.Code.WARNING, msg) return upgradecheck.Result(upgradecheck.Code.SUCCESS)
def get_engine(database='main', context=None): if database == 'main': return db_session.get_engine(context=context) if database == 'api': return db_session.get_api_engine()