def test_auth_middleware_factory(self): """Make sure that configuration settings make their way to the keystone middleware correctly. """ config = cfg.ConfigOpts() conf_fixture = self.useFixture(config_fixture.Config(config)) conf.register_opts(conf_fixture.conf) # NOTE(cdent): There appears to be no simple way to get the list of # options used by the auth_token middleware. So we pull from an # existing data structure. auth_token_opts = auth_token.AUTH_TOKEN_OPTS[0][1] conf_fixture.register_opts(auth_token_opts, group='keystone_authtoken') www_authenticate_uri = 'http://example.com/identity' conf_fixture.config(www_authenticate_uri=www_authenticate_uri, group='keystone_authtoken') # ensure that the auth_token middleware is chosen conf_fixture.config(auth_strategy='keystone', group='api') # register and default policy opts (referenced by deploy) policy_opts.set_defaults(conf_fixture.conf) app = deploy.deploy(conf_fixture.conf) req = webob.Request.blank('/resource_providers', method="GET") response = req.get_response(app) auth_header = response.headers['www-authenticate'] self.assertIn(www_authenticate_uri, auth_header) self.assertIn('keystone uri=', auth_header.lower())
def setUp(self): super(PlacementFixture, self).setUp() if not self.conf_fixture: config = cfg.ConfigOpts() self.conf_fixture = self.useFixture(config_fixture.Config(config)) if self.register_opts: conf.register_opts(self.conf_fixture.conf) if self.db: self.useFixture( db_fixture.Database(self.conf_fixture, set_config=True)) policy_opts.set_defaults(self.conf_fixture.conf) self.conf_fixture.config(group='api', auth_strategy='noauth2') self.conf_fixture.conf([], default_config_files=[]) self.useFixture(policy_fixture.PolicyFixture(self.conf_fixture)) if self.use_intercept: loader = deploy.loadapp(self.conf_fixture.conf) def app(): return loader self.endpoint = 'http://%s/placement' % uuidutils.generate_uuid() intercept = interceptor.RequestsInterceptor(app, url=self.endpoint) intercept.install_intercept() self.addCleanup(intercept.uninstall_intercept)
def init_application(): # initialize the config system conffiles = _get_config_files() config = cfg.ConfigOpts() conf.register_opts(config) # This will raise cfg.RequiredOptError when a required option is not set # (notably the database connection string). We want this to be a hard fail # that prevents the application from starting. The error will show up in # the wsgi server's logs. _parse_args(config, [], default_config_files=conffiles) # initialize the logging system setup_logging(config) # configure database db_api.configure(config) # dump conf at debug if log_options if config.log_options: config.log_opt_values(logging.getLogger(__name__), logging.DEBUG) setup_profiler(config) # build and return our WSGI app return deploy.loadapp(config)
def run_migrations_online(): """Run migrations in 'online' mode. In this scenario we need to create an Engine and associate a connection with the context. """ try: connectable = placement_db.get_placement_engine() except db_exc.CantStartEngineError: # We are being called from a context where the database hasn't been # configured so we need to set up Config and config the database. # This is usually the alembic command line. config = cfg.ConfigOpts() conf.register_opts(config) config([], project="placement", default_config_files=None) placement_db.configure(config) connectable = placement_db.get_placement_engine() with connectable.connect() as connection: context.configure(connection=connection, target_metadata=target_metadata) with context.begin_transaction(): context.run_migrations()
def start_fixture(self): global CONF # Set up stderr and stdout captures by directly driving the # existing nova fixtures that do that. This captures the # output that happens outside individual tests (for # example database migrations). self.standard_logging_fixture = capture.Logging() self.standard_logging_fixture.setUp() self.output_stream_fixture = output.CaptureOutput() self.output_stream_fixture.setUp() self.logging_error_fixture = ( logging_error.get_logging_handle_error_fixture()) self.logging_error_fixture.setUp() # Filter ignorable warnings during test runs. self.warnings_fixture = capture.WarningsFixture() self.warnings_fixture.setUp() # Do not use global CONF self.conf_fixture = config_fixture.Config(cfg.ConfigOpts()) self.conf_fixture.setUp() conf.register_opts(self.conf_fixture.conf) self.conf_fixture.config(group='api', auth_strategy='noauth2') self.placement_db_fixture = fixtures.Database(self.conf_fixture, set_config=True) self.placement_db_fixture.setUp() self.context = context.RequestContext() # Some database interaction methods require access to the oslo config # via the context. Within the WSGI application this is taken care of # but here in the fixtures we use some of those methods to create # entities. self.context.config = self.conf_fixture.conf # Set default policy opts, otherwise the deploy module can # NoSuchOptError. policy_opts.set_defaults(self.conf_fixture.conf) # Make sure default_config_files is an empty list, not None. # If None /etc/placement/placement.conf is read and confuses results. self.conf_fixture.conf([], default_config_files=[]) # Turn on a policy fixture. self.policy_fixture = policy_fixture.PolicyFixture(self.conf_fixture) self.policy_fixture.setUp() os.environ['RP_UUID'] = uuidutils.generate_uuid() os.environ['RP_NAME'] = uuidutils.generate_uuid() os.environ['CUSTOM_RES_CLASS'] = 'CUSTOM_IRON_NFV' os.environ['PROJECT_ID'] = uuidutils.generate_uuid() os.environ['USER_ID'] = uuidutils.generate_uuid() os.environ['PROJECT_ID_ALT'] = uuidutils.generate_uuid() os.environ['USER_ID_ALT'] = uuidutils.generate_uuid() os.environ['INSTANCE_UUID'] = uuidutils.generate_uuid() os.environ['MIGRATION_UUID'] = uuidutils.generate_uuid() os.environ['CONSUMER_UUID'] = uuidutils.generate_uuid() os.environ['PARENT_PROVIDER_UUID'] = uuidutils.generate_uuid() os.environ['ALT_PARENT_PROVIDER_UUID'] = uuidutils.generate_uuid() CONF = self.conf_fixture.conf
def start_fixture(self): global CONF # Set up stderr and stdout captures by directly driving the # existing nova fixtures that do that. This captures the # output that happens outside individual tests (for # example database migrations). self.standard_logging_fixture = capture.Logging() self.standard_logging_fixture.setUp() self.output_stream_fixture = output.CaptureOutput() self.output_stream_fixture.setUp() self.logging_error_fixture = ( logging_error.get_logging_handle_error_fixture()) self.logging_error_fixture.setUp() # Filter ignorable warnings during test runs. self.warnings_fixture = capture.WarningsFixture() self.warnings_fixture.setUp() # Do not use global CONF self.conf_fixture = config_fixture.Config(cfg.ConfigOpts()) self.conf_fixture.setUp() conf.register_opts(self.conf_fixture.conf) self.conf_fixture.config(group='api', auth_strategy='noauth2') self.placement_db_fixture = fixtures.Database(self.conf_fixture, set_config=True) self.placement_db_fixture.setUp() self.context = context.RequestContext() # Register CORS opts, but do not set config. This has the # effect of exercising the "don't use cors" path in # deploy.py. Without setting some config the group will not # be present. self.conf_fixture.register_opts(cors.CORS_OPTS, 'cors') # Set default policy opts, otherwise the deploy module can # NoSuchOptError. policy_opts.set_defaults(self.conf_fixture.conf) # Make sure default_config_files is an empty list, not None. # If None /etc/placement/placement.conf is read and confuses results. self.conf_fixture.conf([], default_config_files=[]) # Turn on a policy fixture. self.policy_fixture = policy_fixture.PolicyFixture(self.conf_fixture) self.policy_fixture.setUp() os.environ['RP_UUID'] = uuidutils.generate_uuid() os.environ['RP_NAME'] = uuidutils.generate_uuid() os.environ['CUSTOM_RES_CLASS'] = 'CUSTOM_IRON_NFV' os.environ['PROJECT_ID'] = uuidutils.generate_uuid() os.environ['USER_ID'] = uuidutils.generate_uuid() os.environ['PROJECT_ID_ALT'] = uuidutils.generate_uuid() os.environ['USER_ID_ALT'] = uuidutils.generate_uuid() os.environ['INSTANCE_UUID'] = uuidutils.generate_uuid() os.environ['MIGRATION_UUID'] = uuidutils.generate_uuid() os.environ['CONSUMER_UUID'] = uuidutils.generate_uuid() os.environ['PARENT_PROVIDER_UUID'] = uuidutils.generate_uuid() os.environ['ALT_PARENT_PROVIDER_UUID'] = uuidutils.generate_uuid() CONF = self.conf_fixture.conf
def setUp(self): super(TestCase, self).setUp() self.user_id = 'fake-user' self.project_id = 'fake-project' self.context = context.RequestContext(self.user_id, self.project_id) config = cfg.ConfigOpts() self.conf_fixture = self.useFixture(config_fixture.Config(config)) conf.register_opts(config) self.context.config = config
def setUp(self): super(TestDBCommands, self).setUp() self.conf = cfg.ConfigOpts() conf_fixture = config_fixture.Config(self.conf) self.useFixture(conf_fixture) conf.register_opts(conf_fixture.conf) conf_fixture.config(group="placement_database", connection='sqlite://') command_opts = manage.setup_commands(conf_fixture) conf_fixture.register_cli_opts(command_opts) self.output = self.useFixture( output.CaptureOutput(do_stderr=True, do_stdout=True))
def _parse_args(config, argv, default_config_files): # register placement's config options conf.register_opts(config) if profiler: profiler.set_defaults(config) _set_middleware_defaults() config(argv[1:], project='placement', version=version_info.version_string(), default_config_files=default_config_files)
def setUp(self): super(PlacementPolicyTestCase, self).setUp() config = cfg.ConfigOpts() self.conf_fixture = self.useFixture(config_fixture.Config(config)) conf.register_opts(config) self.ctxt = context.RequestContext(user_id='fake', project_id='fake') self.target = {'user_id': 'fake', 'project_id': 'fake'} # A value is required in the database connection opt for conf to # parse. self.conf_fixture.config(connection='stub', group='placement_database') config([], default_config_files=[]) self.ctxt.config = config policy.reset() self.addCleanup(policy.reset)
def setUp(self): super(TestEnsureConsumer, self).setUp() self.conf = cfg.ConfigOpts() self.useFixture(config_fixture.Config(self.conf)) conf.register_opts(self.conf) self.mock_project_get = self.useFixture(fixtures.MockPatch( 'placement.objects.project.' 'Project.get_by_external_id')).mock self.mock_user_get = self.useFixture(fixtures.MockPatch( 'placement.objects.user.' 'User.get_by_external_id')).mock self.mock_consumer_get = self.useFixture(fixtures.MockPatch( 'placement.objects.consumer.' 'Consumer.get_by_uuid')).mock self.mock_project_create = self.useFixture(fixtures.MockPatch( 'placement.objects.project.' 'Project.create')).mock self.mock_user_create = self.useFixture(fixtures.MockPatch( 'placement.objects.user.' 'User.create')).mock self.mock_consumer_create = self.useFixture(fixtures.MockPatch( 'placement.objects.consumer.' 'Consumer.create')).mock self.mock_consumer_update = self.useFixture(fixtures.MockPatch( 'placement.objects.consumer.' 'Consumer.update')).mock self.ctx = context.RequestContext(user_id='fake', project_id='fake') self.ctx.config = self.conf self.consumer_id = uuidsentinel.consumer self.project_id = uuidsentinel.project self.user_id = uuidsentinel.user mv_parsed = microversion_parse.Version(1, 27) mv_parsed.max_version = microversion_parse.parse_version_string( microversion.max_version_string()) mv_parsed.min_version = microversion_parse.parse_version_string( microversion.min_version_string()) self.before_version = mv_parsed mv_parsed = microversion_parse.Version(1, 28) mv_parsed.max_version = microversion_parse.parse_version_string( microversion.max_version_string()) mv_parsed.min_version = microversion_parse.parse_version_string( microversion.min_version_string()) self.after_version = mv_parsed mv_parsed = microversion_parse.Version(1, 38) mv_parsed.max_version = microversion_parse.parse_version_string( microversion.max_version_string()) mv_parsed.min_version = microversion_parse.parse_version_string( microversion.min_version_string()) self.cons_type_req_version = mv_parsed
def main(): # Set up the configuration to configure the database. config = cfg.ConfigOpts() conf.register_opts(config) # Register cli opts before parsing args. upgradecheck.register_cli_options(config, Checks(config)) # A slice of sys.argv is provided to pass the command line # arguments for processing, without the name of the calling # script ('placement-status'). If we were using # upgradecheck.main() directly, it would do it for us, but # we do not because of the need to configure the database # first. config(args=sys.argv[1:], project='placement') db_api.configure(config) return upgradecheck.run(config)
def setUp(self): super(TestCommandParsers, self).setUp() self.conf = cfg.ConfigOpts() conf_fixture = config_fixture.Config(self.conf) self.useFixture(conf_fixture) conf.register_opts(conf_fixture.conf) # Quiet output from argparse (used within oslo_config). # If you are debugging, commenting this out might be useful. self.output = self.useFixture( output.CaptureOutput(do_stderr=True, do_stdout=True)) # We don't use a database, but we need to set the opt as # it's required for a valid config. conf_fixture.config(group="placement_database", connection='sqlite://') command_opts = manage.setup_commands(conf_fixture) # Command line opts must be registered on the conf_fixture, otherwise # they carry over globally. conf_fixture.register_cli_opts(command_opts)
def setUp(self): """Establish config defaults for middlewares.""" super(RootNoAuth, self).setUp() config = cfg.ConfigOpts() conf_fixture = self.useFixture(config_fixture.Config(config)) conf.register_opts(conf_fixture.conf) auth_token_opts = auth_token.AUTH_TOKEN_OPTS[0][1] conf_fixture.register_opts(auth_token_opts, group='keystone_authtoken') www_authenticate_uri = 'http://example.com/identity' conf_fixture.config(www_authenticate_uri=www_authenticate_uri, group='keystone_authtoken') # ensure that the auth_token middleware is chosen conf_fixture.config(auth_strategy='keystone', group='api') # register and default policy opts (referenced by deploy) policy_opts.set_defaults(conf_fixture.conf) self.conf = conf_fixture.conf self.app = deploy.deploy(self.conf)
def _parse_args(config, argv, default_config_files): # register placement's config options conf.register_opts(config) if profiler: profiler.set_defaults(config) _set_middleware_defaults() # This is needed so we can check [oslo_policy]/enforce_scope in the # deploy module. policy_opts.set_defaults(config) config(argv[1:], project='placement', version=version_info.version_string(), default_config_files=default_config_files)
def main(): config = cfg.ConfigOpts() conf.register_opts(config) command_opts = setup_commands(config) config.register_cli_opts(command_opts) config(sys.argv[1:], project='placement', version=version_info.version_string(), default_config_files=None) db_api.configure(config) try: func = config.command.func return_code = func() # If return_code ends up None we assume 0. sys.exit(return_code or 0) except cfg.NoSuchOptError: config.print_help() sys.exit(1)
def setUp(self): super(TestCase, self).setUp() # Manage required configuration self.conf_fixture = self.useFixture( config_fixture.Config(cfg.ConfigOpts())) conf.register_opts(self.conf_fixture.conf) self.placement_db = self.useFixture( fixtures.Database(self.conf_fixture, set_config=True)) self.conf_fixture.conf([], default_config_files=[]) self.useFixture(policy_fixture.PolicyFixture(self.conf_fixture)) self.useFixture(capture.Logging()) self.useFixture(output.CaptureOutput()) # Filter ignorable warnings during test runs. self.useFixture(capture.WarningsFixture()) self.useFixture(logging_error.get_logging_handle_error_fixture()) self.context = context.RequestContext() self.context.config = self.conf_fixture.conf
def setUp(self): super(DbApiTests, self).setUp() config = cfg.ConfigOpts() self.conf_fixture = self.useFixture(config_fixture.Config(config)) conf.register_opts(self.conf_fixture.conf) db_api.configure.reset()
def setUp(self): super(TestDirect, self).setUp() self.conf = cfg.ConfigOpts() conf.register_opts(self.conf) policy_opts.set_defaults(self.conf)
def setUp(self): super(UpgradeCheckIncompleteConsumersTestCase, self).setUp() config = cfg.ConfigOpts() conf.register_opts(config) self.checks = status.Checks(config)
def test_local_delete_removes_allocations_after_compute_restart(self): """Tests that allocations are removed after a local delete. This tests the scenario where a server is local deleted (because the compute host is down) and we want to make sure that its allocations have been cleaned up once the nova-compute service restarts. In this scenario we conditionally use the PlacementFixture to simulate the case that nova-api isn't configured to talk to placement, thus we need to manage the placement database independently. """ config = cfg.ConfigOpts() placement_config = self.useFixture(config_fixture.Config(config)) placement_conf.register_opts(config) self.useFixture(placement_db.Database(placement_config, set_config=True)) # Get allocations, make sure they are 0. with func_fixtures.PlacementFixture( conf_fixture=placement_config, db=False) as placement: compute = self.start_service('compute') placement_api = placement.api resp = placement_api.get('/resource_providers') rp_uuid = resp.body['resource_providers'][0]['uuid'] usages_before = self._get_usages(placement_api, rp_uuid) for usage in usages_before.values(): self.assertEqual(0, usage) # Create a server. server = self._build_minimal_create_server_request(self.api, 'local-delete-test', self.image_id, self.flavor_id, 'none') server = self.admin_api.post_server({'server': server}) server = self._wait_for_state_change(self.api, server, 'ACTIVE') # Assert usages are non zero now. usages_during = self._get_usages(placement_api, rp_uuid) for usage in usages_during.values(): self.assertNotEqual(0, usage) # Force-down compute to trigger local delete. compute.stop() compute_service_id = self.admin_api.get_services( host=compute.host, binary='nova-compute')[0]['id'] self.admin_api.put_service(compute_service_id, {'forced_down': True}) # Delete the server (will be a local delete because compute is down). self.api.delete_server(server['id']) self._wait_until_deleted(server) with func_fixtures.PlacementFixture( conf_fixture=placement_config, db=False) as placement: placement_api = placement.api # Assert usages are still non-zero. usages_during = self._get_usages(placement_api, rp_uuid) for usage in usages_during.values(): self.assertNotEqual(0, usage) # Start the compute service again. Before it comes up, it will # call the update_available_resource code in the ResourceTracker # which is what "heals" the allocations for the deleted instance. compute.start() # Get the allocations again to check against the original. usages_after = self._get_usages(placement_api, rp_uuid) # They should match. self.assertEqual(usages_before, usages_after)
def test_local_delete_removes_allocations_after_compute_restart(self): """Tests that allocations are removed after a local delete. This tests the scenario where a server is local deleted (because the compute host is down) and we want to make sure that its allocations have been cleaned up once the nova-compute service restarts. In this scenario we conditionally use the PlacementFixture to simulate the case that nova-api isn't configured to talk to placement, thus we need to manage the placement database independently. """ config = cfg.ConfigOpts() placement_config = self.useFixture(config_fixture.Config(config)) placement_conf.register_opts(config) self.useFixture( placement_db.Database(placement_config, set_config=True)) # Get allocations, make sure they are 0. with func_fixtures.PlacementFixture(conf_fixture=placement_config, db=False) as placement: compute = self.start_service('compute') placement_api = placement.api resp = placement_api.get('/resource_providers') rp_uuid = resp.body['resource_providers'][0]['uuid'] usages_before = self._get_usages(placement_api, rp_uuid) for usage in usages_before.values(): self.assertEqual(0, usage) # Create a server. server = self._build_minimal_create_server_request( self.api, 'local-delete-test', self.image_id, self.flavor_id, 'none') server = self.admin_api.post_server({'server': server}) server = self._wait_for_state_change(self.api, server, 'ACTIVE') # Assert usages are non zero now. usages_during = self._get_usages(placement_api, rp_uuid) for usage in usages_during.values(): self.assertNotEqual(0, usage) # Force-down compute to trigger local delete. compute.stop() compute_service_id = self.admin_api.get_services( host=compute.host, binary='nova-compute')[0]['id'] self.admin_api.put_service(compute_service_id, {'forced_down': True}) # Delete the server (will be a local delete because compute is down). self.api.delete_server(server['id']) self._wait_until_deleted(server) with func_fixtures.PlacementFixture(conf_fixture=placement_config, db=False) as placement: placement_api = placement.api # Assert usages are still non-zero. usages_during = self._get_usages(placement_api, rp_uuid) for usage in usages_during.values(): self.assertNotEqual(0, usage) # Start the compute service again. Before it comes up, it will # call the update_available_resource code in the ResourceTracker # which is what "heals" the allocations for the deleted instance. compute.start() # Get the allocations again to check against the original. usages_after = self._get_usages(placement_api, rp_uuid) # They should match. self.assertEqual(usages_before, usages_after)
def setUp(self): super(TestPlacementDBConf, self).setUp() config = cfg.ConfigOpts() self.conf_fixture = self.useFixture(config_fixture.Config(config)) conf.register_opts(config)
def setUp(self): super(UpgradeCheckIncompleteConsumersTestCase, self).setUp() config = cfg.ConfigOpts() conf.register_opts(config) config(args=[], project='placement') self.checks = status.Checks(config)