def setUp(self):
     super(TestMemberFlows, self).setUp()
     self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
     self.flows = a10_member_flows.MemberFlows()
Exemple #2
0
 def setUp(self):
     super().setUp()
     self.lvs_jinja_cfg = jinja_cfg.LvsJinjaTemplater()
     conf = oslo_fixture.Config(cfg.CONF)
     conf.config(group="haproxy_amphora", base_path=BASE_PATH)
Exemple #3
0
    def setUp(self):
        """Run before each test method to initialize test environment."""
        super(TestCase, self).setUp()
        self.useFixture(
            nova_fixtures.Timeout(os.environ.get('OS_TEST_TIMEOUT', 0),
                                  self.TIMEOUT_SCALING_FACTOR))

        # How many of which service we've started. {$service-name: $count}
        self._service_fixture_count = collections.defaultdict(int)

        self.useFixture(nova_fixtures.OpenStackSDKFixture())

        self.useFixture(fixtures.NestedTempfile())
        self.useFixture(fixtures.TempHomeDir())
        self.useFixture(log_fixture.get_logging_handle_error_fixture())

        self.output = nova_fixtures.OutputStreamCapture()
        self.useFixture(self.output)

        self.stdlog = nova_fixtures.StandardLogging()
        self.useFixture(self.stdlog)

        # NOTE(sdague): because of the way we were using the lock
        # wrapper we ended up with a lot of tests that started
        # relying on global external locking being set up for them. We
        # consider all of these to be *bugs*. Tests should not require
        # global external locking, or if they do, they should
        # explicitly set it up themselves.
        #
        # The following REQUIRES_LOCKING class parameter is provided
        # as a bridge to get us there. No new tests should be added
        # that require it, and existing classes and tests should be
        # fixed to not need it.
        if self.REQUIRES_LOCKING:
            lock_path = self.useFixture(fixtures.TempDir()).path
            self.fixture = self.useFixture(
                config_fixture.Config(lockutils.CONF))
            self.fixture.config(lock_path=lock_path, group='oslo_concurrency')

        self.useFixture(conf_fixture.ConfFixture(CONF))

        if self.STUB_RPC:
            self.useFixture(nova_fixtures.RPCFixture('nova.test'))

            # we cannot set this in the ConfFixture as oslo only registers the
            # notification opts at the first instantiation of a Notifier that
            # happens only in the RPCFixture
            CONF.set_default('driver', ['test'],
                             group='oslo_messaging_notifications')

        # NOTE(danms): Make sure to reset us back to non-remote objects
        # for each test to avoid interactions. Also, backup the object
        # registry.
        objects_base.NovaObject.indirection_api = None
        self._base_test_obj_backup = copy.copy(
            objects_base.NovaObjectRegistry._registry._obj_classes)
        self.addCleanup(self._restore_obj_registry)
        objects.Service.clear_min_version_cache()

        # NOTE(danms): Reset the cached list of cells
        from nova.compute import api
        api.CELLS = []
        context.CELL_CACHE = {}
        context.CELLS = []

        self.cell_mappings = {}
        self.host_mappings = {}
        # NOTE(danms): If the test claims to want to set up the database
        # itself, then it is responsible for all the mapping stuff too.
        if self.USES_DB:
            # NOTE(danms): Full database setup involves a cell0, cell1,
            # and the relevant mappings.
            self.useFixture(nova_fixtures.Database(database='api'))
            self._setup_cells()
            self.useFixture(nova_fixtures.DefaultFlavorsFixture())
        elif not self.USES_DB_SELF:
            # NOTE(danms): If not using the database, we mock out the
            # mapping stuff and effectively collapse everything to a
            # single cell.
            self.useFixture(nova_fixtures.SingleCellSimple())
            self.useFixture(nova_fixtures.DatabasePoisonFixture())

        # NOTE(blk-u): WarningsFixture must be after the Database fixture
        # because sqlalchemy-migrate messes with the warnings filters.
        self.useFixture(nova_fixtures.WarningsFixture())

        self.useFixture(ovo_fixture.StableObjectJsonFixture())

        # NOTE(mnaser): All calls to utils.is_neutron() are cached in
        # nova.utils._IS_NEUTRON.  We set it to None to avoid any
        # caching of that value.
        utils._IS_NEUTRON = None

        # Reset the global QEMU version flag.
        images.QEMU_VERSION = None

        # Reset the compute RPC API globals (mostly the _ROUTER).
        compute_rpcapi.reset_globals()

        # TODO(takashin): Remove MoxStubout fixture
        # after removing tests which uses mox and are related to
        # nova-network in the following files.
        #
        # - nova/tests/unit/api/openstack/compute/test_floating_ips.py
        # - nova/tests/unit/api/openstack/compute/test_security_groups.py
        # - nova/tests/unit/fake_network.py
        # - nova/tests/unit/network/test_manager.py
        mox_fixture = self.useFixture(moxstubout.MoxStubout())
        self.mox = mox_fixture.mox
        self.stubs = mox_fixture.stubs
        self.addCleanup(self._clear_attrs)
        self.useFixture(fixtures.EnvironmentVariable('http_proxy'))
        self.policy = self.useFixture(policy_fixture.PolicyFixture())

        self.useFixture(nova_fixtures.PoisonFunctions())

        openstack_driver.DRIVER_CACHE = {}

        self.useFixture(nova_fixtures.ForbidNewLegacyNotificationFixture())

        # NOTE(mikal): make sure we don't load a privsep helper accidentally
        self.useFixture(nova_fixtures.PrivsepNoHelperFixture())
        self.useFixture(mock_fixture.MockAutospecFixture())

        # FIXME(danms): Disable this for all tests by default to avoid breaking
        # any that depend on default/previous ordering
        self.flags(build_failure_weight_multiplier=0.0,
                   group='filter_scheduler')

        # NOTE(melwitt): Reset the cached set of projects
        quota.UID_QFD_POPULATED_CACHE_BY_PROJECT = set()
        quota.UID_QFD_POPULATED_CACHE_ALL = False
Exemple #4
0
    def setUp(self):
        """Run before each test method to initialize test environment."""
        super(TestCase, self).setUp()

        # Create default notifier
        self.notifier = fake_notifier.get_fake_notifier()

        # Mock rpc get notifier with fake notifier method that joins all
        # notifications with the default notifier
        p = mock.patch('cinder.rpc.get_notifier',
                       side_effect=self._get_joined_notifier)
        p.start()

        # Unit tests do not need to use lazy gettext
        i18n.enable_lazy(False)

        test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0)
        try:
            test_timeout = int(test_timeout)
        except ValueError:
            # If timeout value is invalid do not set a timeout.
            test_timeout = 0
        if test_timeout > 0:
            self.useFixture(fixtures.Timeout(test_timeout, gentle=True))
        self.useFixture(fixtures.NestedTempfile())
        self.useFixture(fixtures.TempHomeDir())

        environ_enabled = (lambda var_name:
                           strutils.bool_from_string(os.environ.get(var_name)))
        if environ_enabled('OS_STDOUT_CAPTURE'):
            stdout = self.useFixture(fixtures.StringStream('stdout')).stream
            self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
        if environ_enabled('OS_STDERR_CAPTURE'):
            stderr = self.useFixture(fixtures.StringStream('stderr')).stream
            self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))

        self.useFixture(log_fixture.get_logging_handle_error_fixture())
        self.useFixture(cinder_fixtures.StandardLogging())

        rpc.add_extra_exmods("cinder.tests.unit")
        self.addCleanup(rpc.clear_extra_exmods)
        self.addCleanup(rpc.cleanup)

        self.messaging_conf = messaging_conffixture.ConfFixture(CONF)
        self.messaging_conf.transport_driver = 'fake'
        self.messaging_conf.response_timeout = 15
        self.useFixture(self.messaging_conf)
        rpc.init(CONF)

        # NOTE(geguileo): This is required because _determine_obj_version_cap
        # and _determine_rpc_version_cap functions in cinder.rpc.RPCAPI cache
        # versions in LAST_RPC_VERSIONS and LAST_OBJ_VERSIONS so we may have
        # weird interactions between tests if we don't clear them before each
        # test.
        rpc.LAST_OBJ_VERSIONS = {}
        rpc.LAST_RPC_VERSIONS = {}

        conf_fixture.set_defaults(CONF)
        CONF([], default_config_files=[])

        # NOTE(vish): We need a better method for creating fixtures for tests
        #             now that we have some required db setup for the system
        #             to work properly.
        self.start = timeutils.utcnow()

        CONF.set_default('connection', 'sqlite://', 'database')
        CONF.set_default('sqlite_synchronous', False, 'database')

        global _DB_CACHE
        if not _DB_CACHE:
            _DB_CACHE = Database(sqla_api, migration,
                                 sql_connection=CONF.database.connection)
        self.useFixture(_DB_CACHE)

        # NOTE(danms): Make sure to reset us back to non-remote objects
        # for each test to avoid interactions. Also, backup the object
        # registry.
        objects_base.CinderObject.indirection_api = None
        self._base_test_obj_backup = copy.copy(
            objects_base.CinderObjectRegistry._registry._obj_classes)
        self.addCleanup(self._restore_obj_registry)

        # emulate some of the mox stuff, we can't use the metaclass
        # because it screws with our generators
        mox_fixture = self.useFixture(moxstubout.MoxStubout())
        self.mox = mox_fixture.mox
        self.stubs = mox_fixture.stubs
        self.addCleanup(CONF.reset)
        self.addCleanup(self._common_cleanup)
        self.injected = []
        self._services = []

        fake_notifier.mock_notifier(self)

        self.override_config('fatal_exception_format_errors', True)
        # This will be cleaned up by the NestedTempfile fixture
        lock_path = self.useFixture(fixtures.TempDir()).path
        self.fixture = self.useFixture(
            config_fixture.Config(lockutils.CONF))
        self.fixture.config(lock_path=lock_path,
                            group='oslo_concurrency')
        lockutils.set_defaults(lock_path)
        self.override_config('policy_file',
                             os.path.join(
                                 os.path.abspath(
                                     os.path.join(
                                         os.path.dirname(__file__),
                                         '..',
                                     )
                                 ),
                                 'cinder/tests/unit/policy.json'),
                             group='oslo_policy')

        self._disable_osprofiler()
        self._disallow_invalid_uuids()

        # NOTE(geguileo): This is required because common get_by_id method in
        # cinder.db.sqlalchemy.api caches get methods and if we use a mocked
        # get method in one test it would carry on to the next test.  So we
        # clear out the cache.
        sqla_api._GET_METHODS = {}

        self.override_config('backend_url', 'file://' + lock_path,
                             group='coordination')
        coordination.COORDINATOR.start()
        self.addCleanup(coordination.COORDINATOR.stop)
 def setUp(self):
     super(TestNeutronClientLBaaSV2, self).setUp()
     conf = service.prepare_service([], [])
     self.CONF = self.useFixture(fixture_config.Config(conf)).conf
     self.nc = neutron_client.Client(self.CONF)
Exemple #6
0
 def setUp(self):
     super(TestHealthSender, self).setUp()
     self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
     self.conf.config(group="health_manager",
                      controller_ip_port_list=IP_PORT)
     self.conf.config(group="health_manager", heartbeat_key=KEY)
Exemple #7
0
 def _set_config(self):
     self.cfg_fixture = self.useFixture(config_fixture.Config(cfg.CONF))
     self.cfg_fixture.config(keyspace='conductor_rpc',
                             group='messaging_server')
Exemple #8
0
 def setUp(self):
     super(SSLMiddlewareTest, self).setUp()
     self.useFixture(config.Config())
Exemple #9
0
 def setUp(self):
     super().setUp()
     conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
     conf.config(group="oslo_messaging", topic='foo_topic')
     conf.config(host='test-hostname')
     self.conf = conf.conf
 def setUp(self):
     super(TestMeterProcessing, self).setUp()
     self.CONF = self.useFixture(fixture_config.Config()).conf
     ceilometer_service.prepare_service(argv=[], config_files=[])
     self.handler = notifications.ProcessMeterNotifications(mock.Mock())
Exemple #11
0
 def _set_config(self):
     self.cfg_fixture = self.useFixture(fixture.Config())
     self.addCleanup(cfg.CONF.reset)
Exemple #12
0
 def setUp(self):
     super(TestUtils, self).setUp()
     imp.reload(utils)
     self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
     self.conf.register_opts(config_options.A10_HARDWARE_THUNDER_OPTS,
                             group=a10constants.HARDWARE_THUNDER_CONF_SECTION)
Exemple #13
0
 def setUp(self):
     super(BaseTestCase, self).setUp()
     self.conf = self.useFixture(
         oslo_fixture.Config(conf=snakeoil_ca.CONF)).conf
     self.tmp_dir = self.useFixture(fixtures.TempDir()).path
Exemple #14
0
def mock_config(test):
    return test.useFixture(oo_cfg.Config())
Exemple #15
0
 def setUp(self):
     super(TestLoading, self).setUp()
     self.conf = cfg.ConfigOpts()
     self.conf_fixture = self.useFixture(fixture.Config(self.conf))
Exemple #16
0
 def setUp(self):
     super(TestPartitioning, self).setUp()
     self.CONF = self.useFixture(fixture_config.Config()).conf
     self.str_handler = MockLoggingHandler()
     coordination.LOG.logger.addHandler(self.str_handler)
     self.shared_storage = {}
Exemple #17
0
    def setUp(self):
        status_socket_file = '/tmp/octavia-{}.status.sock'.format(
            uuidutils.generate_uuid())
        stats_socket_file = '/tmp/octavia-{}.stats.sock'.format(
            uuidutils.generate_uuid())
        get_socket_file = '/tmp/octavia-{}.get.sock'.format(
            uuidutils.generate_uuid())
        sqlite_db_file = '/tmp/octavia-{}.sqlite.db'.format(
            uuidutils.generate_uuid())
        sqlite_db_connection = 'sqlite:///{}'.format(sqlite_db_file)

        # Note that because the driver agent is a multi-process
        # agent we must use a sqlite file rather than an
        # in-memory instance.
        super().setUp(connection_string=sqlite_db_connection)

        conf = self.useFixture(oslo_fixture.Config(config.cfg.CONF))
        conf.config(group="driver_agent",
                    status_socket_path=status_socket_file)
        conf.config(group="driver_agent", stats_socket_path=stats_socket_file)
        conf.config(group="driver_agent", status_request_timeout=1)
        conf.config(group="driver_agent", get_socket_path=get_socket_file)
        conf.config(group="certificates", cert_manager='local_cert_manager')
        conf.config(group="certificates", storage_path='/tmp')

        # Set up the certificate
        cert_manager = stevedore_driver.DriverManager(
            namespace='octavia.cert_manager',
            name=CONF.certificates.cert_manager,
            invoke_on_load=True,
        ).driver
        self.cert_ref = cert_manager.store_cert(
            None,
            sample_certs.X509_CERT,
            sample_certs.X509_CERT_KEY_ENCRYPTED,
            sample_certs.X509_IMDS,
            private_key_passphrase=sample_certs.X509_CERT_KEY_PASSPHRASE)
        self.addCleanup(cert_manager.delete_cert, None, self.cert_ref)

        self.exit_event = multiprocessing.Event()

        self.status_listener_proc = multiprocessing.Process(
            name='status_listener',
            target=driver_listener.status_listener,
            args=(self.exit_event, ))
        # TODO(johnsom) Remove once https://bugs.python.org/issue6721
        #               is resolved.
        self.status_listener_proc.daemon = True

        self.status_listener_proc.start()

        self.stats_listener_proc = multiprocessing.Process(
            name='stats_listener',
            target=driver_listener.stats_listener,
            args=(self.exit_event, ))
        # TODO(johnsom) Remove once https://bugs.python.org/issue6721
        #               is resolved.
        self.stats_listener_proc.daemon = True

        self.stats_listener_proc.start()

        self.get_listener_proc = multiprocessing.Process(
            name='get_listener',
            target=driver_listener.get_listener,
            args=(self.exit_event, ))
        # TODO(johnsom) Remove once https://bugs.python.org/issue6721
        #               is resolved.
        self.get_listener_proc.daemon = True

        self.get_listener_proc.start()

        self.addCleanup(self._process_cleanup)

        self.driver_lib = octavia_driver_lib.DriverLibrary(
            status_socket=status_socket_file,
            stats_socket=stats_socket_file,
            get_socket=get_socket_file)

        self.sample_data = sample_data_models.SampleDriverDataModels()
        self.repos = repositories.Repositories()

        # Create the full load balancer in the database
        self.tls_container_dict = {
            lib_consts.CERTIFICATE:
            sample_certs.X509_CERT.decode('utf-8'),
            lib_consts.ID:
            sample_certs.X509_CERT_SHA1,
            lib_consts.INTERMEDIATES:
            [i.decode('utf-8') for i in sample_certs.X509_IMDS_LIST],
            lib_consts.PASSPHRASE:
            None,
            lib_consts.PRIMARY_CN:
            sample_certs.X509_CERT_CN,
            lib_consts.PRIVATE_KEY:
            sample_certs.X509_CERT_KEY.decode('utf-8')
        }

        # ### Create load balancer
        self.repos.flavor_profile.create(self.session,
                                         id=self.sample_data.flavor_profile_id,
                                         provider_name=constants.AMPHORA,
                                         flavor_data='{"something": "else"}')
        self.repos.flavor.create(
            self.session,
            id=self.sample_data.flavor_id,
            enabled=True,
            flavor_profile_id=self.sample_data.flavor_profile_id)
        self.repos.create_load_balancer_and_vip(
            self.session, self.sample_data.test_loadbalancer1_dict,
            self.sample_data.test_vip_dict)

        # ### Create Pool
        pool_dict = copy.deepcopy(self.sample_data.test_pool1_dict)

        pool_dict[constants.LOAD_BALANCER_ID] = self.sample_data.lb_id

        # Use a live certificate
        pool_dict[constants.TLS_CERTIFICATE_ID] = self.cert_ref
        pool_dict[constants.CA_TLS_CERTIFICATE_ID] = self.cert_ref
        pool_dict[constants.CRL_CONTAINER_ID] = self.cert_ref

        # Remove items that are linked in the DB
        del pool_dict[lib_consts.MEMBERS]
        del pool_dict[constants.HEALTH_MONITOR]
        del pool_dict[lib_consts.SESSION_PERSISTENCE]
        del pool_dict[lib_consts.LISTENERS]
        del pool_dict[lib_consts.L7POLICIES]

        self.repos.pool.create(self.session, **pool_dict)

        self.repos.session_persistence.create(
            self.session,
            pool_id=self.sample_data.pool1_id,
            type=lib_consts.SESSION_PERSISTENCE_SOURCE_IP)

        self.provider_pool_dict = copy.deepcopy(
            self.sample_data.provider_pool1_dict)
        self.provider_pool_dict[
            constants.LISTENER_ID] = self.sample_data.listener1_id

        # Fix for render_unsets = True
        self.provider_pool_dict[lib_consts.SESSION_PERSISTENCE][
            lib_consts.COOKIE_NAME] = None
        self.provider_pool_dict[lib_consts.SESSION_PERSISTENCE][
            lib_consts.PERSISTENCE_GRANULARITY] = None
        self.provider_pool_dict[lib_consts.SESSION_PERSISTENCE][
            lib_consts.PERSISTENCE_TIMEOUT] = None

        # Use a live certificate
        self.provider_pool_dict[
            lib_consts.TLS_CONTAINER_DATA] = self.tls_container_dict
        self.provider_pool_dict[lib_consts.TLS_CONTAINER_REF] = self.cert_ref
        self.provider_pool_dict[lib_consts.CA_TLS_CONTAINER_DATA] = (
            sample_certs.X509_CERT.decode('utf-8'))
        self.provider_pool_dict[
            lib_consts.CA_TLS_CONTAINER_REF] = self.cert_ref
        self.provider_pool_dict[lib_consts.CRL_CONTAINER_DATA] = (
            sample_certs.X509_CERT.decode('utf-8'))
        self.provider_pool_dict[lib_consts.CRL_CONTAINER_REF] = self.cert_ref

        # ### Create Member
        member_dict = copy.deepcopy(self.sample_data.test_member1_dict)
        self.repos.member.create(self.session, **member_dict)
        self.provider_pool_dict[lib_consts.MEMBERS] = [
            self.sample_data.provider_member1_dict
        ]

        # ### Create Health Monitor
        hm_dict = copy.deepcopy(self.sample_data.test_hm1_dict)
        self.repos.health_monitor.create(self.session, **hm_dict)
        self.provider_pool_dict[
            lib_consts.HEALTHMONITOR] = self.sample_data.provider_hm1_dict

        # ### Create Listener
        listener_dict = copy.deepcopy(self.sample_data.test_listener1_dict)
        listener_dict[lib_consts.DEFAULT_POOL_ID] = self.sample_data.pool1_id

        # Remove items that are linked in the DB
        del listener_dict[lib_consts.L7POLICIES]
        del listener_dict[lib_consts.DEFAULT_POOL]
        del listener_dict[constants.SNI_CONTAINERS]

        # Use a live certificate
        listener_dict[constants.TLS_CERTIFICATE_ID] = self.cert_ref
        listener_dict[constants.CLIENT_CA_TLS_CERTIFICATE_ID] = self.cert_ref
        listener_dict[constants.CLIENT_CRL_CONTAINER_ID] = self.cert_ref

        self.repos.listener.create(self.session, **listener_dict)
        self.repos.sni.create(self.session,
                              listener_id=self.sample_data.listener1_id,
                              tls_container_id=self.cert_ref,
                              position=1)

        # Add our live certs in that differ from the fake certs in sample_data
        self.provider_listener_dict = copy.deepcopy(
            self.sample_data.provider_listener1_dict)
        self.provider_listener_dict[
            lib_consts.DEFAULT_TLS_CONTAINER_REF] = self.cert_ref
        self.provider_listener_dict[
            lib_consts.DEFAULT_TLS_CONTAINER_DATA] = self.tls_container_dict
        self.provider_listener_dict[
            lib_consts.CLIENT_CA_TLS_CONTAINER_REF] = self.cert_ref
        self.provider_listener_dict[
            lib_consts.CLIENT_CA_TLS_CONTAINER_DATA] = (
                sample_certs.X509_CERT.decode('utf-8'))
        self.provider_listener_dict[
            lib_consts.CLIENT_CRL_CONTAINER_REF] = self.cert_ref
        self.provider_listener_dict[lib_consts.CLIENT_CRL_CONTAINER_DATA] = (
            sample_certs.X509_CERT.decode('utf-8'))
        self.provider_listener_dict[lib_consts.SNI_CONTAINER_DATA] = [
            self.tls_container_dict
        ]
        self.provider_listener_dict[lib_consts.SNI_CONTAINER_REFS] = [
            self.cert_ref
        ]

        self.provider_listener_dict[
            lib_consts.DEFAULT_POOL] = self.provider_pool_dict
        self.provider_listener_dict[
            lib_consts.DEFAULT_POOL_ID] = self.sample_data.pool1_id

        self.provider_listener_dict[lib_consts.L7POLICIES] = [
            self.sample_data.provider_l7policy1_dict
        ]

        # ### Create L7 Policy
        l7policy_dict = copy.deepcopy(self.sample_data.test_l7policy1_dict)
        del l7policy_dict[lib_consts.L7RULES]
        self.repos.l7policy.create(self.session, **l7policy_dict)

        # ### Create L7 Rules
        l7rule_dict = copy.deepcopy(self.sample_data.test_l7rule1_dict)
        self.repos.l7rule.create(self.session, **l7rule_dict)
        l7rule2_dict = copy.deepcopy(self.sample_data.test_l7rule2_dict)
        self.repos.l7rule.create(self.session, **l7rule2_dict)

        self.provider_lb_dict = copy.deepcopy(
            self.sample_data.provider_loadbalancer_tree_dict)
        self.provider_lb_dict[lib_consts.POOLS] = [self.provider_pool_dict]
        self.provider_lb_dict[lib_consts.LISTENERS] = [
            self.provider_listener_dict
        ]
Exemple #18
0
 def setUp(self):
     super(TestValidations, self).setUp()
     self.conf = oslo_fixture.Config(cfg.CONF)
Exemple #19
0
 def setUp(self):
     super(AuditNotificationsTestCase, self).setUp()
     self.config_fixture = self.useFixture(config_fixture.Config(CONF))
     self.addCleanup(notifications.clear_subscribers)
Exemple #20
0
 def setUp(self):
     super(PolicyFileTestCase, self).setUp()
     self.context = context.get_admin_context()
     self.target = {}
     self.fixture = self.useFixture(config_fixture.Config(CONF))
     self.addCleanup(policy.reset)
Exemple #21
0
    def setUp(self, mock_rpc_server):
        super(MdnsServiceTest, self).setUp()
        self.useFixture(cfg_fixture.Config(CONF))

        self.service = service.Service()
Exemple #22
0
 def setUp(self):
     """Setup for Identity Filter Test Cases."""
     self.config_fixture = self.useFixture(config_fixture.Config(CONF))
     super(IdentityPasswordExpiryFilteredTestCase, self).setUp()
Exemple #23
0
    def setUp(self):
        """Run before each test method to initialize test environment."""
        super(TestCase, self).setUp()

        # Create default notifier
        self.notifier = fake_notifier.get_fake_notifier()

        # Mock rpc get notifier with fake notifier method that joins all
        # notifications with the default notifier
        p = mock.patch('<project_name>.rpc.get_notifier',
                       side_effect=self._get_joined_notifier)
        p.start()

        # Import <project_name> objects for test cases
        objects.register_all()

        # Unit tests do not need to use lazy gettext
        i18n.enable_lazy(False)

        test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0)
        try:
            test_timeout = int(test_timeout)
        except ValueError:
            # If timeout value is invalid do not set a timeout.
            test_timeout = 0
        if test_timeout > 0:
            self.useFixture(fixtures.Timeout(test_timeout, gentle=True))
        self.useFixture(fixtures.NestedTempfile())
        self.useFixture(fixtures.TempHomeDir())

        environ_enabled = (lambda var_name:
                           strutils.bool_from_string(os.environ.get(var_name)))
        if environ_enabled('OS_STDOUT_CAPTURE'):
            stdout = self.useFixture(fixtures.StringStream('stdout')).stream
            self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
        if environ_enabled('OS_STDERR_CAPTURE'):
            stderr = self.useFixture(fixtures.StringStream('stderr')).stream
            self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))

        self.useFixture(log_fixture.get_logging_handle_error_fixture())
        self.useFixture(<project_name>_fixtures.StandardLogging())

        rpc.add_extra_exmods("<project_name>.tests.unit")
        self.addCleanup(rpc.clear_extra_exmods)
        self.addCleanup(rpc.cleanup)

        self.messaging_conf = messaging_conffixture.ConfFixture(CONF)
        self.messaging_conf.transport_driver = 'fake'
        self.messaging_conf.response_timeout = 15
        self.useFixture(self.messaging_conf)
        rpc.init(CONF)

        conf_fixture.set_defaults(CONF)
        CONF([], default_config_files=[])

        # NOTE(vish): We need a better method for creating fixtures for tests
        #             now that we have some required db setup for the system
        #             to work properly.
        self.start = timeutils.utcnow()

        CONF.set_default('connection', 'sqlite://', 'database')
        CONF.set_default('sqlite_synchronous', False, 'database')

        global _DB_CACHE
        if not _DB_CACHE:
            _DB_CACHE = Database(sqla_api, migration,
                                 sql_connection=CONF.database.connection,
                                 sqlite_db=CONF.database.sqlite_db,
                                 sqlite_clean_db=CONF.sqlite_clean_db)
        self.useFixture(_DB_CACHE)

        # emulate some of the mox stuff, we can't use the metaclass
        # because it screws with our generators
        mox_fixture = self.useFixture(moxstubout.MoxStubout())
        self.mox = mox_fixture.mox
        self.stubs = mox_fixture.stubs
        self.addCleanup(CONF.reset)
        self.addCleanup(self._common_cleanup)
        self.injected = []
        self._services = []

        fake_notifier.stub_notifier(self.stubs)

        self.override_config('fatal_exception_format_errors', True)
        # This will be cleaned up by the NestedTempfile fixture
        lock_path = self.useFixture(fixtures.TempDir()).path
        self.fixture = self.useFixture(
            config_fixture.Config(lockutils.CONF))
        self.fixture.config(lock_path=lock_path,
                            group='oslo_concurrency')
        lockutils.set_defaults(lock_path)
        self.override_config('policy_file',
                             os.path.join(
                                 os.path.abspath(
                                     os.path.join(
                                         os.path.dirname(__file__),
                                         '..',
                                     )
                                 ),
                                 '<project_name>/tests/unit/policy.json'),
                             group='oslo_policy')

        self._disable_osprofiler()
Exemple #24
0
    def setUp(self):
        super(SchedulerTest, self).setUp()

        self.context = context.DesignateContext()
        self.CONF = self.useFixture(cfg_fixture.Config(cfg.CONF)).conf
Exemple #25
0
 def setUp(self):
     super(RequestContextMiddlewareTest, self).setUp()
     self.fixture = self.useFixture(config_fixture.Config())
     self.fixture.conf(args=['--config-dir', policy_path])
     policy_opts.set_defaults(cfg.CONF, 'check_admin.json')
Exemple #26
0
 def setUp(self):
     super(URISourceTestCase, self).setUp()
     self.conf = cfg.ConfigOpts()
     self.conf_fixture = self.useFixture(fixture.Config(self.conf))
Exemple #27
0
    def setUp(self):
        super(TestRepoMetricsCassandra, self).setUp()

        self._fixture_config = self.useFixture(fixture_config.Config(cfg.CONF))
        self._fixture_config.config(cluster_ip_addresses='127.0.0.1',
                                    group='cassandra')
Exemple #28
0
 def setUp(self):
     super(TestProcessingSources, self).setUp()
     self.conf = cfg.ConfigOpts()
     self.conf_fixture = self.useFixture(fixture.Config(self.conf))
Exemple #29
0
    def setUp(self):
        """Run before each test method to initialize test environment."""
        super(TestCase, self).setUp()
        self.useFixture(
            nova_fixtures.Timeout(os.environ.get('OS_TEST_TIMEOUT', 0),
                                  self.TIMEOUT_SCALING_FACTOR))

        self.useFixture(fixtures.NestedTempfile())
        self.useFixture(fixtures.TempHomeDir())
        self.useFixture(log_fixture.get_logging_handle_error_fixture())

        self.useFixture(nova_fixtures.OutputStreamCapture())

        self.useFixture(nova_fixtures.StandardLogging())

        # NOTE(sdague): because of the way we were using the lock
        # wrapper we eneded up with a lot of tests that started
        # relying on global external locking being set up for them. We
        # consider all of these to be *bugs*. Tests should not require
        # global external locking, or if they do, they should
        # explicitly set it up themselves.
        #
        # The following REQUIRES_LOCKING class parameter is provided
        # as a bridge to get us there. No new tests should be added
        # that require it, and existing classes and tests should be
        # fixed to not need it.
        if self.REQUIRES_LOCKING:
            lock_path = self.useFixture(fixtures.TempDir()).path
            self.fixture = self.useFixture(
                config_fixture.Config(lockutils.CONF))
            self.fixture.config(lock_path=lock_path, group='oslo_concurrency')

        self.useFixture(conf_fixture.ConfFixture(CONF))
        self.useFixture(nova_fixtures.RPCFixture('nova.test'))

        if self.USES_DB:
            self.useFixture(nova_fixtures.Database())

        # NOTE(blk-u): WarningsFixture must be after the Database fixture
        # because sqlalchemy-migrate messes with the warnings filters.
        self.useFixture(nova_fixtures.WarningsFixture())

        # NOTE(danms): Make sure to reset us back to non-remote objects
        # for each test to avoid interactions. Also, backup the object
        # registry.
        objects_base.NovaObject.indirection_api = None
        self._base_test_obj_backup = copy.copy(
            objects_base.NovaObjectRegistry._registry._obj_classes)
        self.addCleanup(self._restore_obj_registry)

        # NOTE(mnaser): All calls to utils.is_neutron() are cached in
        # nova.utils._IS_NEUTRON.  We set it to None to avoid any
        # caching of that value.
        utils._IS_NEUTRON = None

        mox_fixture = self.useFixture(moxstubout.MoxStubout())
        self.mox = mox_fixture.mox
        self.stubs = mox_fixture.stubs
        self.addCleanup(self._clear_attrs)
        self.useFixture(fixtures.EnvironmentVariable('http_proxy'))
        self.policy = self.useFixture(policy_fixture.PolicyFixture())

        self.useFixture(nova_fixtures.PoisonFunctions())
Exemple #30
0
 def setUp(self):
     super(_TestModelsMigrations, self).setUp()
     self.cfg = self.useFixture(config_fixture.Config())
     self.cfg.config(core_plugin=CORE_PLUGIN)
     self.alembic_config = migration.get_neutron_config()
     self.alembic_config.neutron_config = cfg.CONF