def initialize_db(self): db = CONF.image_cache_sqlite_db self.db_path = os.path.join(self.base_dir, db) lockutils.set_defaults(self.base_dir) @lockutils.synchronized('image_cache_db_init', external=True) def create_db(): try: conn = sqlite3.connect(self.db_path, check_same_thread=False, factory=SqliteConnection) conn.executescript(""" CREATE TABLE IF NOT EXISTS cached_images ( image_id TEXT PRIMARY KEY, last_accessed REAL DEFAULT 0.0, last_modified REAL DEFAULT 0.0, size INTEGER DEFAULT 0, hits INTEGER DEFAULT 0, checksum TEXT ); """) conn.close() except sqlite3.DatabaseError as e: msg = _("Failed to initialize the image cache database. " "Got error: %s") % e LOG.error(msg) raise exception.BadDriverConfiguration(driver_name='sqlite', reason=msg) create_db()
def setUp(self): super(ConfigFixture, self).setUp() self.conf.set_default('build_interval', 10, group='compute') self.conf.set_default('build_timeout', 10, group='compute') self.conf.set_default('image_ref', 'fake_image_id', group='compute') self.conf.set_default('disable_ssl_certificate_validation', True, group='identity') self.conf.set_default('uri', 'http://fake_uri.com/auth', group='identity') self.conf.set_default('uri_v3', 'http://fake_uri_v3.com/auth', group='identity') self.conf.set_default('neutron', True, group='service_available') lock_path = str(os.environ.get('OS_TEST_LOCK_PATH', os.environ.get('TMPDIR', '/tmp'))) if not os.path.exists(lock_path): os.mkdir(lock_path) lockutils.set_defaults( lock_path=lock_path, ) self.conf.set_default('auth_version', 'v2', group='identity') for config_option in ['username', 'password', 'project_name']: # Identity group items self.conf.set_default('admin_' + config_option, 'fake_' + config_option, group='auth')
def setUp(self): super(ConfigFixture, self).setUp() self.conf.set_default('build_interval', 10, group='compute') self.conf.set_default('build_timeout', 10, group='compute') self.conf.set_default('disable_ssl_certificate_validation', True, group='identity') self.conf.set_default('uri', 'http://fake_uri.com/auth', group='identity') self.conf.set_default('uri_v3', 'http://fake_uri_v3.com/auth', group='identity') self.conf.set_default('neutron', True, group='service_available') self.conf.set_default('heat', True, group='service_available') if not os.path.exists(str(os.environ.get('OS_TEST_LOCK_PATH'))): os.mkdir(str(os.environ.get('OS_TEST_LOCK_PATH'))) lockutils.set_defaults( lock_path=str(os.environ.get('OS_TEST_LOCK_PATH')), ) self.conf.set_default('auth_version', 'v2', group='identity') for config_option in ['username', 'password', 'project_name']: # Identity group items for prefix in ['', 'alt_', 'admin_']: if prefix == 'admin_': group = 'auth' else: group = 'identity' self.conf.set_default(prefix + config_option, 'fake_' + config_option, group=group)
def __init__(self, cluster_id, vmax_common=None, vmax_hosts=None, compute_instance=None, lock_path='/tmp'): self.cluster_id = cluster_id self.name_prefix = u'FLOCKER-' self.vmax_common = vmax_common self.min_allocation = self.vmax_round_allocation(0) self.vmax_hosts = vmax_hosts self.compute_instance = compute_instance if self.compute_instance is None: self.compute_instance = platform.node() if hasattr(pywbem.cim_operations, 'wbem_request'): pywbem.cim_http.wbem_request = wbem_request pywbem.cim_operations.wbem_request = wbem_request self.volume_stats = {} self.default_pool = {} for profile in self.vmax_common.keys(): self.vmax_common[profile]._initial_setup = self._initial_setup self._gather_info(profile) self.volume_stats[profile] = self.vmax_common[profile].update_volume_stats() self.default_pool[profile] = None \ if 'pools' not in self.volume_stats[profile] \ else self.volume_stats[profile]['pools'][0] self.lock_path = lock_path lockutils.set_defaults(lock_path)
def setUp(self): super(MySQLConfFixture, self).setUp() self.register_opts(options.database_opts, group='database') self.url = db_test_utils.get_connect_string("mysql") self.set_default('connection', self.url, group='database') lockutils.set_defaults(lock_path='/tmp') self._drop_db()
def setUp(self): super(ConfigFixture, self).setUp() self.conf.set_default('build_interval', 10, group='compute') self.conf.set_default('build_timeout', 10, group='compute') self.conf.set_default('disable_ssl_certificate_validation', True, group='identity') self.conf.set_default('uri', 'http://fake_uri.com/auth', group='identity') self.conf.set_default('uri_v3', 'http://fake_uri_v3.com/auth', group='identity') self.conf.set_default('neutron', True, group='service_available') lock_path = str( os.environ.get('OS_TEST_LOCK_PATH', os.environ.get('TMPDIR', '/tmp'))) if not os.path.exists(lock_path): os.mkdir(lock_path) lockutils.set_defaults(lock_path=lock_path, ) self.conf.set_default('auth_version', 'v2', group='identity') for config_option in ['username', 'password', 'project_name']: # Identity group items self.conf.set_default('admin_' + config_option, 'fake_' + config_option, group='auth')
def setUp(self): super(ConfigFixture, self).setUp() self.conf.set_default('build_interval', 10, group='compute') self.conf.set_default('build_timeout', 10, group='compute') self.conf.set_default('disable_ssl_certificate_validation', True, group='identity') self.conf.set_default('uri', 'http://fake_uri.com/auth', group='identity') self.conf.set_default('uri_v3', 'http://fake_uri_v3.com/auth', group='identity') self.conf.set_default('neutron', True, group='service_available') self.conf.set_default('heat', True, group='service_available') if not os.path.exists(str(os.environ.get('OS_TEST_LOCK_PATH'))): os.mkdir(str(os.environ.get('OS_TEST_LOCK_PATH'))) lockutils.set_defaults(lock_path=str( os.environ.get('OS_TEST_LOCK_PATH')), ) self.conf.set_default('auth_version', 'v2', group='identity') for config_option in ['username', 'password', 'tenant_name']: # Identity group items for prefix in ['', 'alt_', 'admin_']: if prefix == 'admin_': group = 'auth' else: group = 'identity' self.conf.set_default(prefix + config_option, 'fake_' + config_option, group=group)
def setUp(self): super(MySQLConfFixture, self).setUp() self.register_opts(options.database_opts, group='database') self.url = db_test_utils.get_connect_string("mysql") self.set_default('connection', self.url, group='database') lockutils.set_defaults(lock_path='/tmp') self._drop_db()
def setUp(self): super(IsolatedUnitTest, self).setUp() options.set_defaults(CONF, connection='sqlite://', sqlite_db='aflo.sqlite') lockutils.set_defaults(os.path.join(self.test_dir)) self.config(debug=False)
def setUp(self): super(IsolatedUnitTest, self).setUp() options.set_defaults(CONF, connection='sqlite://') lockutils.set_defaults(os.path.join(self.test_dir)) self.config(debug=False) self.config(default_store='filesystem', filesystem_store_datadir=self.test_dir, group="glance_store") store.create_stores() def fake_get_conection_type(client): DEFAULT_REGISTRY_PORT = 9191 DEFAULT_API_PORT = 9292 if client.port == DEFAULT_API_PORT: return stubs.FakeGlanceConnection elif client.port == DEFAULT_REGISTRY_PORT: return stubs.FakeRegistryConnection(registry=self.registry) self.patcher = mock.patch( 'glance.common.client.BaseClient.get_connection_type', fake_get_conection_type) self.addCleanup(self.patcher.stop) self.patcher.start()
def __init__(self, cluster_id, vmax_common=None, vmax_hosts=None, compute_instance=None, lock_path='/tmp'): self.cluster_id = cluster_id self.name_prefix = u'FLOCKER-' self.vmax_common = vmax_common self.min_allocation = self.vmax_round_allocation(0) self.vmax_hosts = vmax_hosts self.compute_instance = compute_instance if self.compute_instance is None: self.compute_instance = platform.node() if hasattr(pywbem.cim_operations, 'wbem_request'): pywbem.cim_http.wbem_request = wbem_request pywbem.cim_operations.wbem_request = wbem_request self.volume_stats = {} self.default_pool = {} for profile in self.vmax_common.keys(): self.vmax_common[profile]._initial_setup = self._initial_setup self._gather_info(profile) self.volume_stats[profile] = self.vmax_common[ profile].update_volume_stats() self.default_pool[profile] = None \ if 'pools' not in self.volume_stats[profile] \ else self.volume_stats[profile]['pools'][0] self.lock_path = lock_path lockutils.set_defaults(lock_path)
def __init__(self, siteId=None, tokenPrefix=None, **kwargs): """ Base class supporting application token management in persistent store. kwargs allows for overriding ConfigInfo for testing """ # self._cI = ConfigInfo(siteId) # if tokenPrefix is not None: fn = tokenPrefix + "_TOKEN_STORE.pic" else: fn = "ANONYMOUSWS_TOKEN_STORE.pic" # self.__filePath = kwargs.get("site_service_registration_dir_path") if not self.__filePath: self.__filePath = os.path.join( self._cI.get("SITE_SERVICE_REGISTRATION_DIR_PATH"), fn) # logger.debug("Assigning token store file path %r", self.__filePath) self.__lockDirPath = kwargs.get( "site_service_registration_lockdir_path") if not self.__lockDirPath: self.__lockDirPath = self._cI.get( "SITE_SERVICE_REGISTRATION_LOCKDIR_PATH", ".") # self.__tokenD = {} self.__emailD = {} self.__tokenPrefix = tokenPrefix if tokenPrefix else "WS" self.__pickleProtocol = 0 # lockutils.set_defaults(self.__lockDirPath) # self.deserialize()
def __init__(self, sessionPath, prefix=None): self.__filePrefix = prefix if prefix is not None else "general" self.__sessionPath = sessionPath self.__filePath = None # lockutils.set_defaults(self.__sessionPath) # self.__setup()
def parse_args(args=None, usage=None, default_config_files=None): if "OSLO_LOCK_PATH" not in os.environ: lockutils.set_defaults(tempfile.gettempdir()) CONF(args=args, project='umbrella', version=None, usage=usage, default_config_files=default_config_files)
def parse_args(args=None, usage=None, default_config_files=None): if "OSLO_LOCK_PATH" not in os.environ: lockutils.set_defaults(tempfile.gettempdir()) CONF(args=args, project='rebac', version=version.cached_version_string(), usage=usage, default_config_files=default_config_files)
def parse_args(args=None, usage=None, default_config_files=None): if "OSLO_LOCK_PATH" not in os.environ: lockutils.set_defaults(tempfile.gettempdir()) CONF(args=args, project='umbrella', version=None, usage=usage, default_config_files=default_config_files)
def setUp(self): super(PostgresConfFixture, self).setUp() self.register_opts(options.database_opts, group='database') self.register_opts(cli.MIGRATION_OPTS) self.url = db_test_utils.get_connect_string("postgres") self.set_default('connection', self.url, group='database') self.set_default('disable_microsecond_data_migration', False) lockutils.set_defaults(lock_path='/tmp') self._drop_db()
def parse_args(args=None, usage=None, default_config_files=None): if "OSLO_LOCK_PATH" not in os.environ: lockutils.set_defaults(tempfile.gettempdir()) CONF(args=args, project='searchlight', version=version.cached_version_string(), usage=usage, default_config_files=default_config_files)
def setUp(self): super(MultiIsolatedUnitTest, self).setUp() options.set_defaults(CONF, connection='sqlite://') lockutils.set_defaults(os.path.join(self.test_dir)) self.config(debug=False) stubs.stub_out_registry_and_store_server(self, self.test_dir, registry=self.registry)
def setUp(self): super(PostgresConfFixture, self).setUp() self.register_opts(options.database_opts, group='database') self.register_opts(cli.MIGRATION_OPTS) self.url = db_test_utils.get_connect_string("postgres") self.set_default('connection', self.url, group='database') self.set_default('disable_microsecond_data_migration', False) lockutils.set_defaults(lock_path='/tmp') self._drop_db()
def setUp(self): super(MySQLConfFixture, self).setUp() self.register_opt(cfg.IntOpt('max_pool_size', default=20), group='database') self.register_opt(cfg.IntOpt('idle_timeout', default=3600), group='database') self.register_opt(cfg.StrOpt('connection', default=''), group='database') self.url = db_test_utils.get_connect_string("mysql") self.set_default('connection', self.url, group='database') lockutils.set_defaults(lock_path='/tmp') self._drop_db()
def setUp(self): super(IsolatedUnitTest, self).setUp() options.set_defaults(CONF, connection="sqlite://", sqlite_db="glance.sqlite") lockutils.set_defaults(os.path.join(self.test_dir)) self.config(verbose=False, debug=False) self.config( default_store="filesystem", filesystem_store_datadir=os.path.join(self.test_dir), group="glance_store" ) store.create_stores() stubs.stub_out_registry_and_store_server(self.stubs, self.test_dir, registry=self.registry)
def setUp(self): super(IsolatedUnitTest, self).setUp() options.set_defaults(CONF, connection='sqlite:////%s/tests.sqlite' % self.test_dir) lockutils.set_defaults(os.path.join(self.test_dir)) self.config(debug=False) self.config(default_store='filesystem', filesystem_store_datadir=self.test_dir, group="glance_store") store.create_stores()
def setUp(self): super(PostgresConfFixture, self).setUp() self.register_opt(cfg.StrOpt('connection', default=''), group='database') self.register_opt(cfg.IntOpt('max_pool_size', default=20), group='database') self.register_opt(cfg.IntOpt('idle_timeout', default=3600), group='database') self.register_opts(cli.MIGRATION_OPTS) self.url = db_test_utils.get_connect_string("postgres") self.set_default('connection', self.url, group='database') self.set_default('disable_microsecond_data_migration', False) lockutils.set_defaults(lock_path='/tmp') self._drop_db()
def setUp(self): super(IsolatedUnitTest, self).setUp() options.set_defaults(CONF, connection='sqlite://') lockutils.set_defaults(os.path.join(self.test_dir)) self.config(debug=False) self.config(default_store='filesystem', filesystem_store_datadir=self.test_dir, group="glance_store") store.create_stores() stubs.stub_out_registry_and_store_server(self.stubs, self.test_dir, registry=self.registry)
def __init__(self, verbose=False, log=sys.stderr): # pylint: disable=unused-argument self.__verbose = verbose self.__debug = True self.__cI = ConfigInfo(siteId=None, verbose=self.__verbose) self.__cIDepUI = ConfigInfoAppDepUI(siteId=getSiteId()) # Default data set id range assignments self.__depIdAssignments = self.__cI.get( "SITE_DATASET_ID_ASSIGNMENT_DICTIONARY") self.__depTestIdAssignments = self.__cI.get( "SITE_DATASET_TEST_ID_ASSIGNMENT_DICTIONARY") self.__siteBackupD = self.__cI.get("SITE_BACKUP_DICT", default={}) self.__dsLocD = None # self.__lockDirPath = self.__cI.get( "SITE_SERVICE_REGISTRATION_LOCKDIR_PATH", "/tmp") lockutils.set_defaults(self.__lockDirPath)
def setUp(self): super(SqliteConfFixture, self).setUp() self.register_opt(cfg.StrOpt('connection', default=''), group='database') self.register_opt(cfg.IntOpt('max_pool_size', default=None), group='database') self.register_opt(cfg.IntOpt('idle_timeout', default=None), group='database') self.register_opts(cli.MIGRATION_OPTS) self.url = db_test_utils.get_connect_string("sqlite") self.set_default('connection', self.url, group='database') self.set_default('disable_microsecond_data_migration', False) lockutils.set_defaults(lock_path='/tmp') self._drop_db() self.addCleanup(self.cleanup)
def setUp(self): super(IsolatedUnitTest, self).setUp() options.set_defaults(CONF, connection="sqlite://", sqlite_db="glance.sqlite") lockutils.set_defaults(os.path.join(self.test_dir)) self.config(verbose=False, debug=False) self.config( default_store="filesystem", filesystem_store_datadir=os.path.join(self.test_dir), group="glance_store" ) store.create_stores() stubs.stub_out_registry_and_store_server(self.stubs, self.test_dir, registry=self.registry) # clear context left-over from any previous test executions if hasattr(local.store, "context"): delattr(local.store, "context")
def __init__(self, cluster_id, vmax_common=None, vmax_hosts=None, compute_instance=None, dbhost='localhost', lock_path='/tmp'): self.cluster_id = cluster_id self.vmax_common = vmax_common self.min_allocation = self.vmax_round_allocation(0) self.dbconn = emc_flocker_db.EmcFlockerDb(dbhost) self.vmax_hosts = vmax_hosts self.compute_instance = compute_instance if self.compute_instance is None: self.compute_instance = platform.node() self.volume_stats = self.vmax_common.update_volume_stats() if 'pools' in self.volume_stats: self.default_pool = self.volume_stats['pools'][0] else: self.default_pool = None self.lock_path = lock_path lockutils.set_defaults(lock_path)
def __init__(self, cluster_id, vmax_common=None, vmax_hosts=None, compute_instance=None, dbhost='localhost', lock_path='/tmp'): self.cluster_id = cluster_id self.vmax_common = vmax_common self.min_allocation = self.vmax_round_allocation(0) self.dbconn = emc_flocker_db.EmcFlockerDb(dbhost) self.vmax_hosts = vmax_hosts self.compute_instance = compute_instance if self.compute_instance is None: self.compute_instance = platform.node() self.volume_stats = self.vmax_common.update_volume_stats() if 'pools' in self.volume_stats: self.default_pool = self.volume_stats['pools'][0] else: self.default_pool = None self.lock_path = lock_path lockutils.set_defaults(lock_path)
def __init__(self, cluster_id, vmax_common=None, vmax_hosts=None, compute_instance=None, dbhost='localhost:emc_flocker_hash', lock_path='/tmp'): self.cluster_id = cluster_id self.vmax_common = vmax_common self.min_allocation = self.vmax_round_allocation(0) self.dbconn = emc_flocker_db.EmcFlockerDb(dbhost.split(':')[0], key=dbhost.split(':')[1]) self.vmax_hosts = vmax_hosts self.compute_instance = compute_instance if self.compute_instance is None: self.compute_instance = platform.node() self.volume_stats = {} self.default_pool = {} for profile in self.vmax_common.keys(): self.vmax_common[profile]._initial_setup = self._initial_setup self._gather_info(profile) self.volume_stats[profile] = self.vmax_common[profile].update_volume_stats() self.default_pool[profile] = None \ if 'pools' not in self.volume_stats[profile] \ else self.volume_stats[profile]['pools'][0] self.lock_path = lock_path lockutils.set_defaults(lock_path)
def setUp(self): """Run before each test method to initialize test environment.""" super(TestCase, self).setUp() # Create default notifier self.notifier = fake_notifier.get_fake_notifier() # Mock rpc get notifier with fake notifier method that joins all # notifications with the default notifier self.patch('cinder.rpc.get_notifier', side_effect=self._get_joined_notifier) if self.MOCK_WORKER: # Mock worker creation for all tests that don't care about it clean_path = 'cinder.objects.cleanable.CinderCleanableObject.%s' for method in ('create_worker', 'set_worker', 'unset_worker'): self.patch(clean_path % method, return_value=None) if self.MOCK_TOOZ: self.patch('cinder.coordination.Coordinator.start') self.patch('cinder.coordination.Coordinator.stop') self.patch('cinder.coordination.Coordinator.get_lock') # Unit tests do not need to use lazy gettext i18n.enable_lazy(False) test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0) try: test_timeout = int(test_timeout) except ValueError: # If timeout value is invalid do not set a timeout. test_timeout = 0 if test_timeout > 0: self.useFixture(fixtures.Timeout(test_timeout, gentle=True)) self.useFixture(fixtures.NestedTempfile()) self.useFixture(fixtures.TempHomeDir()) environ_enabled = (lambda var_name: strutils.bool_from_string( os.environ.get(var_name))) if environ_enabled('OS_STDOUT_CAPTURE'): stdout = self.useFixture(fixtures.StringStream('stdout')).stream self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout)) if environ_enabled('OS_STDERR_CAPTURE'): stderr = self.useFixture(fixtures.StringStream('stderr')).stream self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr)) self.useFixture(log_fixture.get_logging_handle_error_fixture()) self.useFixture(cinder_fixtures.StandardLogging()) rpc.add_extra_exmods("cinder.tests.unit") self.addCleanup(rpc.clear_extra_exmods) self.addCleanup(rpc.cleanup) self.messaging_conf = messaging_conffixture.ConfFixture(CONF) self.messaging_conf.transport_url = 'fake:/' self.messaging_conf.response_timeout = 15 self.useFixture(self.messaging_conf) # Load oslo_messaging_notifications config group so we can set an # override to prevent notifications from being ignored due to the # short-circuit mechanism. oslo_messaging.get_notification_transport(CONF) # We need to use a valid driver for the notifications, so we use test. self.override_config('driver', ['test'], group='oslo_messaging_notifications') rpc.init(CONF) # NOTE(geguileo): This is required because _determine_obj_version_cap # and _determine_rpc_version_cap functions in cinder.rpc.RPCAPI cache # versions in LAST_RPC_VERSIONS and LAST_OBJ_VERSIONS so we may have # weird interactions between tests if we don't clear them before each # test. rpc.LAST_OBJ_VERSIONS = {} rpc.LAST_RPC_VERSIONS = {} # Init AuthProtocol to register some base options first, such as # auth_url. auth_token.AuthProtocol('fake_app', { 'auth_type': 'password', 'auth_url': 'fake_url' }) conf_fixture.set_defaults(CONF) CONF([], default_config_files=[]) # NOTE(vish): We need a better method for creating fixtures for tests # now that we have some required db setup for the system # to work properly. self.start = timeutils.utcnow() CONF.set_default('connection', 'sqlite://', 'database') CONF.set_default('sqlite_synchronous', False, 'database') global _DB_CACHE if not _DB_CACHE: _DB_CACHE = Database(sqla_api, migration, sql_connection=CONF.database.connection) self.useFixture(_DB_CACHE) # NOTE(blk-u): WarningsFixture must be after the Database fixture # because sqlalchemy-migrate messes with the warnings filters. self.useFixture(cinder_fixtures.WarningsFixture()) # NOTE(danms): Make sure to reset us back to non-remote objects # for each test to avoid interactions. Also, backup the object # registry. objects_base.CinderObject.indirection_api = None self._base_test_obj_backup = copy.copy( objects_base.CinderObjectRegistry._registry._obj_classes) self.addCleanup(self._restore_obj_registry) self.addCleanup(CONF.reset) self.addCleanup(self._common_cleanup) self.injected = [] self._services = [] fake_notifier.mock_notifier(self) # This will be cleaned up by the NestedTempfile fixture lock_path = self.useFixture(fixtures.TempDir()).path self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF)) self.fixture.config(lock_path=lock_path, group='oslo_concurrency') lockutils.set_defaults(lock_path) self.override_config('policy_file', os.path.join( os.path.abspath( os.path.join( os.path.dirname(__file__), '..', )), self.POLICY_PATH), group='oslo_policy') self.override_config( 'resource_query_filters_file', os.path.join( os.path.abspath(os.path.join( os.path.dirname(__file__), '..', )), self.RESOURCE_FILTER_PATH)) self._disable_osprofiler() # NOTE(geguileo): This is required because common get_by_id method in # cinder.db.sqlalchemy.api caches get methods and if we use a mocked # get method in one test it would carry on to the next test. So we # clear out the cache. sqla_api._GET_METHODS = {} self.override_config('backend_url', 'file://' + lock_path, group='coordination') coordination.COORDINATOR.start() self.addCleanup(coordination.COORDINATOR.stop) if six.PY3: # TODO(smcginnis) Python 3 deprecates assertRaisesRegexp to # assertRaisesRegex, but Python 2 does not have the new name. This # can be removed once we stop supporting py2 or the new name is # added. self.assertRaisesRegexp = self.assertRaisesRegex # Ensure we have the default tpool size value and we don't carry # threads from other test runs. tpool.killall() tpool._nthreads = 20 # NOTE(mikal): make sure we don't load a privsep helper accidentally self.useFixture(cinder_fixtures.PrivsepNoHelperFixture())
def test_get_default(self): lockutils.set_defaults(lock_path='/the/path') self.assertEqual('/the/path', lockutils.get_lock_path(self.conf))
def __init__(self, name): lockutils.set_defaults(lock_path='/tmp') super(LockFixture, self).__init__(name, 'coverage-db-lock-')
async def init(args): LOG.info("Starting DEEP Dashboard...") runtime_dir = pathlib.Path(CONF.runtime_dir) runtime_dir.mkdir(parents=True, exist_ok=True) app = web.Application(debug=True) app.runtime_dir = runtime_dir lockutils.set_defaults(runtime_dir) tpl_path = pathlib.Path(__file__).parent / "templates" aiohttp_jinja2.setup( app, context_processors=[aiohttp_session_flash.context_processor], loader=jinja2.FileSystemLoader(tpl_path) ) app.iam_client = auth.get_iam_client() base.routes.static('/static', CONF.static_path, name="static") app.add_routes(base.routes) app.add_routes(deployments.routes) app.add_routes(modules.routes) if CONF.cache.memcached_ip: loop = asyncio.get_event_loop() mc = aiomcache.Client(CONF.cache.memcached_ip, CONF.cache.memcached_port, loop=loop) sess_storage = aiohttp_session.memcached_storage.MemcachedStorage( mc, cookie_name='DEEPDASHBOARD_M' ) aiocache.caches.add('default', { 'cache': "aiocache.MemcachedCache", 'endpoint': CONF.cache.memcached_ip, 'port': CONF.cache.memcached_port, }) else: LOG.warning("Not using memcached, unexpected behaviour when running " "more than one worker!") # secret_key must be 32 url-safe base64-encoded bytes fernet_key = fernet.Fernet.generate_key() secret_key = base64.urlsafe_b64decode(fernet_key) sess_storage = aiohttp_session.cookie_storage.EncryptedCookieStorage( secret_key, cookie_name='DEEPDASHBOARD_E' ) aiocache.caches.add('default', { 'cache': "aiocache.SimpleMemoryCache", }) aiohttp_session.setup(app, sess_storage) policy = aiohttp_security.SessionIdentityPolicy() aiohttp_security.setup(app, policy, auth.IamAuthorizationPolicy()) app.middlewares.append(meta_middleware) app.middlewares.append(aiohttp_session_flash.middleware) app.middlewares.append(auth.auth_middleware) app.middlewares.append(error_middleware) app.modules = {} cache = aiocache.caches.get("default") app.cache = collections.namedtuple( "Cache", ["modules", "tosca_templates"], defaults=[CacheManager(cache, "modules"), CacheManager(cache, "tosca")] )() app.scheduler = await aiojobs.create_scheduler() app.pool = concurrent.futures.ThreadPoolExecutor() app.on_startup.append(deep_oc.download_catalog) app.on_startup.append(deep_oc.load_catalog) return app
def setUp(self): """Run before each test method to initialize test environment.""" super(TestCase, self).setUp() # Import cinder objects for test cases objects.register_all() # Unit tests do not need to use lazy gettext i18n.enable_lazy(False) test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0) try: test_timeout = int(test_timeout) except ValueError: # If timeout value is invalid do not set a timeout. test_timeout = 0 if test_timeout > 0: self.useFixture(fixtures.Timeout(test_timeout, gentle=True)) self.useFixture(fixtures.NestedTempfile()) self.useFixture(fixtures.TempHomeDir()) environ_enabled = (lambda var_name: strutils.bool_from_string(os.environ.get(var_name))) if environ_enabled('OS_STDOUT_CAPTURE'): stdout = self.useFixture(fixtures.StringStream('stdout')).stream self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout)) if environ_enabled('OS_STDERR_CAPTURE'): stderr = self.useFixture(fixtures.StringStream('stderr')).stream self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr)) if environ_enabled('OS_LOG_CAPTURE'): log_format = '%(levelname)s [%(name)s] %(message)s' if environ_enabled('OS_DEBUG'): level = logging.DEBUG else: level = logging.INFO self.useFixture(fixtures.LoggerFixture(nuke_handlers=False, format=log_format, level=level)) rpc.add_extra_exmods("cinder.tests.unit") self.addCleanup(rpc.clear_extra_exmods) self.addCleanup(rpc.cleanup) self.messaging_conf = messaging_conffixture.ConfFixture(CONF) self.messaging_conf.transport_driver = 'fake' self.messaging_conf.response_timeout = 15 self.useFixture(self.messaging_conf) rpc.init(CONF) conf_fixture.set_defaults(CONF) CONF([], default_config_files=[]) # NOTE(vish): We need a better method for creating fixtures for tests # now that we have some required db setup for the system # to work properly. self.start = timeutils.utcnow() CONF.set_default('connection', 'sqlite://', 'database') CONF.set_default('sqlite_synchronous', False, 'database') global _DB_CACHE if not _DB_CACHE: _DB_CACHE = Database(sqla_api, migration, sql_connection=CONF.database.connection, sqlite_db=CONF.database.sqlite_db, sqlite_clean_db=CONF.sqlite_clean_db) self.useFixture(_DB_CACHE) # emulate some of the mox stuff, we can't use the metaclass # because it screws with our generators self.mox = mox.Mox() self.stubs = stubout.StubOutForTesting() self.addCleanup(CONF.reset) self.addCleanup(self.mox.UnsetStubs) self.addCleanup(self.stubs.UnsetAll) self.addCleanup(self.stubs.SmartUnsetAll) self.addCleanup(self.mox.VerifyAll) self.addCleanup(self._common_cleanup) self.injected = [] self._services = [] fake_notifier.stub_notifier(self.stubs) self.override_config('fatal_exception_format_errors', True) # This will be cleaned up by the NestedTempfile fixture lock_path = self.useFixture(fixtures.TempDir()).path self.fixture = self.useFixture( config_fixture.Config(lockutils.CONF)) self.fixture.config(lock_path=lock_path, group='oslo_concurrency') lockutils.set_defaults(lock_path) self.override_config('policy_file', os.path.join( os.path.abspath( os.path.join( os.path.dirname(__file__), '..', ) ), 'cinder/tests/unit/policy.json'))
def setUp(self): super(MultiIsolatedUnitTest, self).setUp() options.set_defaults(CONF, connection='sqlite://') lockutils.set_defaults(os.path.join(self.test_dir)) self.config(debug=False)
def setUp(self): """Run before each test method to initialize test environment.""" super(TestCase, self).setUp() # Create default notifier self.notifier = fake_notifier.get_fake_notifier() # Mock rpc get notifier with fake notifier method that joins all # notifications with the default notifier self.patch('cinder.rpc.get_notifier', side_effect=self._get_joined_notifier) if self.MOCK_WORKER: # Mock worker creation for all tests that don't care about it clean_path = 'cinder.objects.cleanable.CinderCleanableObject.%s' for method in ('create_worker', 'set_worker', 'unset_worker'): self.patch(clean_path % method, return_value=None) if self.MOCK_TOOZ: self.patch('cinder.coordination.Coordinator.start') self.patch('cinder.coordination.Coordinator.stop') self.patch('cinder.coordination.Coordinator.get_lock') # Unit tests do not need to use lazy gettext i18n.enable_lazy(False) test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0) try: test_timeout = int(test_timeout) except ValueError: # If timeout value is invalid do not set a timeout. test_timeout = 0 if test_timeout > 0: self.useFixture(fixtures.Timeout(test_timeout, gentle=True)) self.useFixture(fixtures.NestedTempfile()) self.useFixture(fixtures.TempHomeDir()) environ_enabled = (lambda var_name: strutils.bool_from_string(os.environ.get(var_name))) if environ_enabled('OS_STDOUT_CAPTURE'): stdout = self.useFixture(fixtures.StringStream('stdout')).stream self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout)) if environ_enabled('OS_STDERR_CAPTURE'): stderr = self.useFixture(fixtures.StringStream('stderr')).stream self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr)) self.useFixture(log_fixture.get_logging_handle_error_fixture()) self.useFixture(cinder_fixtures.StandardLogging()) rpc.add_extra_exmods("cinder.tests.unit") self.addCleanup(rpc.clear_extra_exmods) self.addCleanup(rpc.cleanup) self.messaging_conf = messaging_conffixture.ConfFixture(CONF) self.messaging_conf.transport_driver = 'fake' self.messaging_conf.response_timeout = 15 self.useFixture(self.messaging_conf) # Load oslo_messaging_notifications config group so we can set an # override to prevent notifications from being ignored due to the # short-circuit mechanism. oslo_messaging.get_notification_transport(CONF) # We need to use a valid driver for the notifications, so we use test. self.override_config('driver', ['test'], group='oslo_messaging_notifications') rpc.init(CONF) # NOTE(geguileo): This is required because _determine_obj_version_cap # and _determine_rpc_version_cap functions in cinder.rpc.RPCAPI cache # versions in LAST_RPC_VERSIONS and LAST_OBJ_VERSIONS so we may have # weird interactions between tests if we don't clear them before each # test. rpc.LAST_OBJ_VERSIONS = {} rpc.LAST_RPC_VERSIONS = {} conf_fixture.set_defaults(CONF) CONF([], default_config_files=[]) # NOTE(vish): We need a better method for creating fixtures for tests # now that we have some required db setup for the system # to work properly. self.start = timeutils.utcnow() CONF.set_default('connection', 'sqlite://', 'database') CONF.set_default('sqlite_synchronous', False, 'database') global _DB_CACHE if not _DB_CACHE: _DB_CACHE = Database(sqla_api, migration, sql_connection=CONF.database.connection) self.useFixture(_DB_CACHE) # NOTE(blk-u): WarningsFixture must be after the Database fixture # because sqlalchemy-migrate messes with the warnings filters. self.useFixture(cinder_fixtures.WarningsFixture()) # NOTE(danms): Make sure to reset us back to non-remote objects # for each test to avoid interactions. Also, backup the object # registry. objects_base.CinderObject.indirection_api = None self._base_test_obj_backup = copy.copy( objects_base.CinderObjectRegistry._registry._obj_classes) self.addCleanup(self._restore_obj_registry) self.addCleanup(CONF.reset) self.addCleanup(self._common_cleanup) self.injected = [] self._services = [] fake_notifier.mock_notifier(self) # This will be cleaned up by the NestedTempfile fixture lock_path = self.useFixture(fixtures.TempDir()).path self.fixture = self.useFixture( config_fixture.Config(lockutils.CONF)) self.fixture.config(lock_path=lock_path, group='oslo_concurrency') lockutils.set_defaults(lock_path) self.override_config('policy_file', os.path.join( os.path.abspath( os.path.join( os.path.dirname(__file__), '..', ) ), self.POLICY_PATH), group='oslo_policy') self.override_config('resource_query_filters_file', os.path.join( os.path.abspath( os.path.join( os.path.dirname(__file__), '..', ) ), self.RESOURCE_FILTER_PATH)) self._disable_osprofiler() # NOTE(geguileo): This is required because common get_by_id method in # cinder.db.sqlalchemy.api caches get methods and if we use a mocked # get method in one test it would carry on to the next test. So we # clear out the cache. sqla_api._GET_METHODS = {} self.override_config('backend_url', 'file://' + lock_path, group='coordination') coordination.COORDINATOR.start() self.addCleanup(coordination.COORDINATOR.stop) if six.PY3: # TODO(smcginnis) Python 3 deprecates assertRaisesRegexp to # assertRaisesRegex, but Python 2 does not have the new name. This # can be removed once we stop supporting py2 or the new name is # added. self.assertRaisesRegexp = self.assertRaisesRegex # Ensure we have the default tpool size value and we don't carry # threads from other test runs. tpool.killall() tpool._nthreads = 20
def __init__(self, name): lockutils.set_defaults(lock_path='/tmp') super(LockFixture, self).__init__(name, 'subunit-db-lock-')
def setUp(self): """Run before each test method to initialize test environment.""" super(TestCase, self).setUp() # Create default notifier self.notifier = fake_notifier.get_fake_notifier() # Mock rpc get notifier with fake notifier method that joins all # notifications with the default notifier p = mock.patch('cinder.rpc.get_notifier', side_effect=self._get_joined_notifier) p.start() # Unit tests do not need to use lazy gettext i18n.enable_lazy(False) test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0) try: test_timeout = int(test_timeout) except ValueError: # If timeout value is invalid do not set a timeout. test_timeout = 0 if test_timeout > 0: self.useFixture(fixtures.Timeout(test_timeout, gentle=True)) self.useFixture(fixtures.NestedTempfile()) self.useFixture(fixtures.TempHomeDir()) environ_enabled = (lambda var_name: strutils.bool_from_string(os.environ.get(var_name))) if environ_enabled('OS_STDOUT_CAPTURE'): stdout = self.useFixture(fixtures.StringStream('stdout')).stream self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout)) if environ_enabled('OS_STDERR_CAPTURE'): stderr = self.useFixture(fixtures.StringStream('stderr')).stream self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr)) self.useFixture(log_fixture.get_logging_handle_error_fixture()) self.useFixture(cinder_fixtures.StandardLogging()) rpc.add_extra_exmods("cinder.tests.unit") self.addCleanup(rpc.clear_extra_exmods) self.addCleanup(rpc.cleanup) self.messaging_conf = messaging_conffixture.ConfFixture(CONF) self.messaging_conf.transport_driver = 'fake' self.messaging_conf.response_timeout = 15 self.useFixture(self.messaging_conf) rpc.init(CONF) conf_fixture.set_defaults(CONF) CONF([], default_config_files=[]) # NOTE(vish): We need a better method for creating fixtures for tests # now that we have some required db setup for the system # to work properly. self.start = timeutils.utcnow() CONF.set_default('connection', 'sqlite://', 'database') CONF.set_default('sqlite_synchronous', False, 'database') global _DB_CACHE if not _DB_CACHE: _DB_CACHE = Database(sqla_api, migration, sql_connection=CONF.database.connection, sqlite_db=CONF.database.sqlite_db, sqlite_clean_db=CONF.sqlite_clean_db) self.useFixture(_DB_CACHE) # NOTE(danms): Make sure to reset us back to non-remote objects # for each test to avoid interactions. Also, backup the object # registry. objects_base.CinderObject.indirection_api = None self._base_test_obj_backup = copy.copy( objects_base.CinderObjectRegistry._registry._obj_classes) self.addCleanup(self._restore_obj_registry) # emulate some of the mox stuff, we can't use the metaclass # because it screws with our generators mox_fixture = self.useFixture(moxstubout.MoxStubout()) self.mox = mox_fixture.mox self.stubs = mox_fixture.stubs self.addCleanup(CONF.reset) self.addCleanup(self._common_cleanup) self.injected = [] self._services = [] fake_notifier.stub_notifier(self.stubs) self.override_config('fatal_exception_format_errors', True) # This will be cleaned up by the NestedTempfile fixture lock_path = self.useFixture(fixtures.TempDir()).path self.fixture = self.useFixture( config_fixture.Config(lockutils.CONF)) self.fixture.config(lock_path=lock_path, group='oslo_concurrency') lockutils.set_defaults(lock_path) self.override_config('policy_file', os.path.join( os.path.abspath( os.path.join( os.path.dirname(__file__), '..', ) ), 'cinder/tests/unit/policy.json'), group='oslo_policy') self._disable_osprofiler() # NOTE(geguileo): This is required because common get_by_id method in # cinder.db.sqlalchemy.api caches get methods and if we use a mocked # get method in one test it would carry on to the next test. So we # clear out the cache. sqla_api._GET_METHODS = {}
# Default SOA Values cfg.IntOpt('default-soa-refresh', default=3600), cfg.IntOpt('default-soa-retry', default=600), cfg.IntOpt('default-soa-expire', default=86400), cfg.IntOpt('default-soa-minimum', default=3600), ]) # Set some Oslo Log defaults log.set_defaults(default_log_levels=[ 'amqplib=WARN', 'amqp=WARN', 'boto=WARN', 'eventlet.wsgi.server=WARN', 'iso8601=WARN', 'kazoo.client=WARN', 'keystone=INFO', 'keystonemiddleware.auth_token=INFO', 'oslo_messaging=WARN', 'oslo_service.loopingcall=WARN', 'sqlalchemy=WARN', 'stevedore=WARN', 'suds=INFO', ]) # Set some Oslo RPC defaults messaging.set_transport_defaults('designate') # Set some Oslo Oslo Concurrency defaults lockutils.set_defaults(lock_path='$state_path')
# Default SOA Values cfg.IntOpt('default-soa-refresh', default=3600), cfg.IntOpt('default-soa-retry', default=600), cfg.IntOpt('default-soa-expire', default=86400), cfg.IntOpt('default-soa-minimum', default=3600), ]) # Set some Oslo Log defaults log.set_defaults(default_log_levels=[ 'amqplib=WARN', 'amqp=WARN', 'boto=WARN', 'eventlet.wsgi.server=WARN', 'iso8601=WARN', 'kazoo.client=WARN', 'keystone=INFO', 'keystonemiddleware.auth_token=INFO', 'oslo_messaging=WARN', 'oslo_service.loopingcall=WARN', 'sqlalchemy=WARN', 'stevedore=WARN', 'suds=INFO', ]) # Set some Oslo RPC defaults messaging.set_transport_defaults('designate') # Set some Oslo Oslo Concurrency defaults lockutils.set_defaults(lock_path='$state_path')
def test_get_default(self): lockutils.set_defaults(lock_path='/the/path') self.assertEqual('/the/path', lockutils.get_lock_path(self.conf))
def setUp(self): """Run before each test method to initialize test environment.""" super(TestCase, self).setUp() # Create default notifier self.notifier = fake_notifier.get_fake_notifier() # Mock rpc get notifier with fake notifier method that joins all # notifications with the default notifier self.patch('cinder.rpc.get_notifier', side_effect=self._get_joined_notifier) if self.MOCK_WORKER: # Mock worker creation for all tests that don't care about it clean_path = 'cinder.objects.cleanable.CinderCleanableObject.%s' for method in ('create_worker', 'set_worker', 'unset_worker'): self.patch(clean_path % method, return_value=None) # Unit tests do not need to use lazy gettext i18n.enable_lazy(False) test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0) try: test_timeout = int(test_timeout) except ValueError: # If timeout value is invalid do not set a timeout. test_timeout = 0 if test_timeout > 0: self.useFixture(fixtures.Timeout(test_timeout, gentle=True)) self.useFixture(fixtures.NestedTempfile()) self.useFixture(fixtures.TempHomeDir()) environ_enabled = (lambda var_name: strutils.bool_from_string( os.environ.get(var_name))) if environ_enabled('OS_STDOUT_CAPTURE'): stdout = self.useFixture(fixtures.StringStream('stdout')).stream self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout)) if environ_enabled('OS_STDERR_CAPTURE'): stderr = self.useFixture(fixtures.StringStream('stderr')).stream self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr)) self.useFixture(log_fixture.get_logging_handle_error_fixture()) self.useFixture(cinder_fixtures.StandardLogging()) rpc.add_extra_exmods("cinder.tests.unit") self.addCleanup(rpc.clear_extra_exmods) self.addCleanup(rpc.cleanup) self.messaging_conf = messaging_conffixture.ConfFixture(CONF) self.messaging_conf.transport_driver = 'fake' self.messaging_conf.response_timeout = 15 self.useFixture(self.messaging_conf) # Load oslo_messaging_notifications config group so we can set an # override to prevent notifications from being ignored due to the # short-circuit mechanism. oslo_messaging.get_notification_transport(CONF) # We need to use a valid driver for the notifications, so we use test. self.override_config('driver', ['test'], group='oslo_messaging_notifications') rpc.init(CONF) # NOTE(geguileo): This is required because _determine_obj_version_cap # and _determine_rpc_version_cap functions in cinder.rpc.RPCAPI cache # versions in LAST_RPC_VERSIONS and LAST_OBJ_VERSIONS so we may have # weird interactions between tests if we don't clear them before each # test. rpc.LAST_OBJ_VERSIONS = {} rpc.LAST_RPC_VERSIONS = {} conf_fixture.set_defaults(CONF) CONF([], default_config_files=[]) # NOTE(vish): We need a better method for creating fixtures for tests # now that we have some required db setup for the system # to work properly. self.start = timeutils.utcnow() CONF.set_default('connection', 'sqlite://', 'database') CONF.set_default('sqlite_synchronous', False, 'database') global _DB_CACHE if not _DB_CACHE: _DB_CACHE = Database(sqla_api, migration, sql_connection=CONF.database.connection) self.useFixture(_DB_CACHE) # NOTE(danms): Make sure to reset us back to non-remote objects # for each test to avoid interactions. Also, backup the object # registry. objects_base.CinderObject.indirection_api = None self._base_test_obj_backup = copy.copy( objects_base.CinderObjectRegistry._registry._obj_classes) self.addCleanup(self._restore_obj_registry) # emulate some of the mox stuff, we can't use the metaclass # because it screws with our generators mox_fixture = self.useFixture(moxstubout.MoxStubout()) self.mox = mox_fixture.mox self.stubs = mox_fixture.stubs self.addCleanup(CONF.reset) self.addCleanup(self._common_cleanup) self.injected = [] self._services = [] fake_notifier.mock_notifier(self) self.override_config('fatal_exception_format_errors', True) # This will be cleaned up by the NestedTempfile fixture lock_path = self.useFixture(fixtures.TempDir()).path self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF)) self.fixture.config(lock_path=lock_path, group='oslo_concurrency') lockutils.set_defaults(lock_path) self.override_config('policy_file', os.path.join( os.path.abspath( os.path.join( os.path.dirname(__file__), '..', )), self.POLICY_PATH), group='oslo_policy') self._disable_osprofiler() self._disallow_invalid_uuids() # NOTE(geguileo): This is required because common get_by_id method in # cinder.db.sqlalchemy.api caches get methods and if we use a mocked # get method in one test it would carry on to the next test. So we # clear out the cache. sqla_api._GET_METHODS = {} self.override_config('backend_url', 'file://' + lock_path, group='coordination') coordination.COORDINATOR.start() self.addCleanup(coordination.COORDINATOR.stop)
def setUp(self): """Run before each test method to initialize test environment.""" super(TestCase, self).setUp() # Create default notifier self.notifier = fake_notifier.get_fake_notifier() # Mock rpc get notifier with fake notifier method that joins all # notifications with the default notifier p = mock.patch("cinder.rpc.get_notifier", side_effect=self._get_joined_notifier) p.start() # Unit tests do not need to use lazy gettext i18n.enable_lazy(False) test_timeout = os.environ.get("OS_TEST_TIMEOUT", 0) try: test_timeout = int(test_timeout) except ValueError: # If timeout value is invalid do not set a timeout. test_timeout = 0 if test_timeout > 0: self.useFixture(fixtures.Timeout(test_timeout, gentle=True)) self.useFixture(fixtures.NestedTempfile()) self.useFixture(fixtures.TempHomeDir()) environ_enabled = lambda var_name: strutils.bool_from_string(os.environ.get(var_name)) if environ_enabled("OS_STDOUT_CAPTURE"): stdout = self.useFixture(fixtures.StringStream("stdout")).stream self.useFixture(fixtures.MonkeyPatch("sys.stdout", stdout)) if environ_enabled("OS_STDERR_CAPTURE"): stderr = self.useFixture(fixtures.StringStream("stderr")).stream self.useFixture(fixtures.MonkeyPatch("sys.stderr", stderr)) if environ_enabled("OS_LOG_CAPTURE"): log_format = "%(levelname)s [%(name)s] %(message)s" if environ_enabled("OS_DEBUG"): level = logging.DEBUG else: level = logging.INFO self.useFixture(fixtures.LoggerFixture(nuke_handlers=False, format=log_format, level=level)) rpc.add_extra_exmods("cinder.tests.unit") self.addCleanup(rpc.clear_extra_exmods) self.addCleanup(rpc.cleanup) self.messaging_conf = messaging_conffixture.ConfFixture(CONF) self.messaging_conf.transport_driver = "fake" self.messaging_conf.response_timeout = 15 self.useFixture(self.messaging_conf) rpc.init(CONF) conf_fixture.set_defaults(CONF) CONF([], default_config_files=[]) # NOTE(vish): We need a better method for creating fixtures for tests # now that we have some required db setup for the system # to work properly. self.start = timeutils.utcnow() CONF.set_default("connection", "sqlite://", "database") CONF.set_default("sqlite_synchronous", False, "database") global _DB_CACHE if not _DB_CACHE: _DB_CACHE = Database( sqla_api, migration, sql_connection=CONF.database.connection, sqlite_db=CONF.database.sqlite_db, sqlite_clean_db=CONF.sqlite_clean_db, ) self.useFixture(_DB_CACHE) # NOTE(danms): Make sure to reset us back to non-remote objects # for each test to avoid interactions. Also, backup the object # registry. objects_base.CinderObject.indirection_api = None self._base_test_obj_backup = copy.copy(objects_base.CinderObjectRegistry._registry._obj_classes) self.addCleanup(self._restore_obj_registry) # emulate some of the mox stuff, we can't use the metaclass # because it screws with our generators mox_fixture = self.useFixture(moxstubout.MoxStubout()) self.mox = mox_fixture.mox self.stubs = mox_fixture.stubs self.addCleanup(CONF.reset) self.addCleanup(self._common_cleanup) self.injected = [] self._services = [] fake_notifier.stub_notifier(self.stubs) self.override_config("fatal_exception_format_errors", True) # This will be cleaned up by the NestedTempfile fixture lock_path = self.useFixture(fixtures.TempDir()).path self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF)) self.fixture.config(lock_path=lock_path, group="oslo_concurrency") lockutils.set_defaults(lock_path) self.override_config( "policy_file", os.path.join( os.path.abspath(os.path.join(os.path.dirname(__file__), "..")), "cinder/tests/unit/policy.json" ), group="oslo_policy", ) self._disable_osprofiler()
def setUp(self): """Run before each test method to initialize test environment.""" super(TestCase, self).setUp() # Create default notifier self.notifier = fake_notifier.get_fake_notifier() # Mock rpc get notifier with fake notifier method that joins all # notifications with the default notifier p = mock.patch('cinder.rpc.get_notifier', side_effect=self._get_joined_notifier) p.start() # Unit tests do not need to use lazy gettext i18n.enable_lazy(False) test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0) try: test_timeout = int(test_timeout) except ValueError: # If timeout value is invalid do not set a timeout. test_timeout = 0 if test_timeout > 0: self.useFixture(fixtures.Timeout(test_timeout, gentle=True)) self.useFixture(fixtures.NestedTempfile()) self.useFixture(fixtures.TempHomeDir()) environ_enabled = (lambda var_name: strutils.bool_from_string(os.environ.get(var_name))) if environ_enabled('OS_STDOUT_CAPTURE'): stdout = self.useFixture(fixtures.StringStream('stdout')).stream self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout)) if environ_enabled('OS_STDERR_CAPTURE'): stderr = self.useFixture(fixtures.StringStream('stderr')).stream self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr)) self.useFixture(log_fixture.get_logging_handle_error_fixture()) self.useFixture(cinder_fixtures.StandardLogging()) rpc.add_extra_exmods("cinder.tests.unit") self.addCleanup(rpc.clear_extra_exmods) self.addCleanup(rpc.cleanup) self.messaging_conf = messaging_conffixture.ConfFixture(CONF) self.messaging_conf.transport_driver = 'fake' self.messaging_conf.response_timeout = 15 self.useFixture(self.messaging_conf) rpc.init(CONF) # NOTE(geguileo): This is required because _determine_obj_version_cap # and _determine_rpc_version_cap functions in cinder.rpc.RPCAPI cache # versions in LAST_RPC_VERSIONS and LAST_OBJ_VERSIONS so we may have # weird interactions between tests if we don't clear them before each # test. rpc.LAST_OBJ_VERSIONS = {} rpc.LAST_RPC_VERSIONS = {} conf_fixture.set_defaults(CONF) CONF([], default_config_files=[]) # NOTE(vish): We need a better method for creating fixtures for tests # now that we have some required db setup for the system # to work properly. self.start = timeutils.utcnow() CONF.set_default('connection', 'sqlite://', 'database') CONF.set_default('sqlite_synchronous', False, 'database') global _DB_CACHE if not _DB_CACHE: _DB_CACHE = Database(sqla_api, migration, sql_connection=CONF.database.connection, sqlite_db=CONF.database.sqlite_db, sqlite_clean_db='clean.sqlite') self.useFixture(_DB_CACHE) # NOTE(danms): Make sure to reset us back to non-remote objects # for each test to avoid interactions. Also, backup the object # registry. objects_base.CinderObject.indirection_api = None self._base_test_obj_backup = copy.copy( objects_base.CinderObjectRegistry._registry._obj_classes) self.addCleanup(self._restore_obj_registry) # emulate some of the mox stuff, we can't use the metaclass # because it screws with our generators mox_fixture = self.useFixture(moxstubout.MoxStubout()) self.mox = mox_fixture.mox self.stubs = mox_fixture.stubs self.addCleanup(CONF.reset) self.addCleanup(self._common_cleanup) self.injected = [] self._services = [] fake_notifier.stub_notifier(self.stubs) self.override_config('fatal_exception_format_errors', True) # This will be cleaned up by the NestedTempfile fixture lock_path = self.useFixture(fixtures.TempDir()).path self.fixture = self.useFixture( config_fixture.Config(lockutils.CONF)) self.fixture.config(lock_path=lock_path, group='oslo_concurrency') lockutils.set_defaults(lock_path) self.override_config('policy_file', os.path.join( os.path.abspath( os.path.join( os.path.dirname(__file__), '..', ) ), 'cinder/tests/unit/policy.json'), group='oslo_policy') self._disable_osprofiler() # NOTE(geguileo): This is required because common get_by_id method in # cinder.db.sqlalchemy.api caches get methods and if we use a mocked # get method in one test it would carry on to the next test. So we # clear out the cache. sqla_api._GET_METHODS = {}
datefmt='%H:%M:%S') logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) # for debugging if False: file_handler = logging.FileHandler('debug.log') file_handler.setFormatter( logging.Formatter('%(levelname)-8s %(asctime)-12s %(message)s')) logger.addHandler(file_handler) logger.setLevel(logging.DEBUG) user_data_dir = appdirs.user_data_dir('auto_LiRPA') if not os.path.exists(user_data_dir): os.makedirs(user_data_dir) lockutils.set_defaults(os.path.join(user_data_dir, '.lock')) class AverageMeter(object): """Computes and stores the average and current value""" def __init__(self): self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val