def setUp(self): super(TestMultiStoreBase, self).setUp() enabled_backends = { "fast": "file", "cheap": "file", } self.reserved_stores = {'consuming_service_reserved_store': 'file'} self.conf = self._CONF self.conf(args=[]) self.conf.register_opt(cfg.DictOpt('enabled_backends')) self.config(enabled_backends=enabled_backends) store.register_store_opts(self.conf, reserved_stores=self.reserved_stores) self.config(default_backend='file1', group='glance_store') # Ensure stores + locations cleared location.SCHEME_TO_CLS_BACKEND_MAP = {} store.create_multi_stores(self.conf, reserved_stores=self.reserved_stores) self.addCleanup(setattr, location, 'SCHEME_TO_CLS_BACKEND_MAP', dict()) self.addCleanup(self.conf.reset)
def setUp(self): """Establish a clean test environment.""" super(TestSheepdogMultiStore, self).setUp() enabled_backends = { "sheepdog1": "sheepdog", "sheepdog2": "sheepdog", } self.conf = self._CONF self.conf(args=[]) self.conf.register_opt(cfg.DictOpt('enabled_backends')) self.config(enabled_backends=enabled_backends) store.register_store_opts(self.conf) self.config(default_backend='sheepdog1', group='glance_store') # mock sheepdog commands def _fake_execute(*cmd, **kwargs): pass execute = mock.patch.object(processutils, 'execute').start() execute.side_effect = _fake_execute self.addCleanup(execute.stop) # Ensure stores + locations cleared location.SCHEME_TO_CLS_BACKEND_MAP = {} store.create_multi_stores(self.conf) self.addCleanup(setattr, location, 'SCHEME_TO_CLS_BACKEND_MAP', dict()) self.addCleanup(self.conf.reset) self.store = sheepdog.Store(self.conf, backend='sheepdog1') self.store.configure() self.store_specs = {'image': '6bd59e6e-c410-11e5-ab67-0a73f1fda51b', 'addr': '127.0.0.1', 'port': 7000}
def setUp(self): super(TestCopyImageTask, self).setUp() self.db = unit_test_utils.FakeDB(initialize=False) self._create_images() self.image_repo = mock.MagicMock() self.task_repo = mock.MagicMock() self.image_id = UUID1 self.staging_store = mock.MagicMock() self.task_factory = domain.TaskFactory() task_input = { "import_req": { 'method': { 'name': 'copy-image', }, 'stores': ['fast'] } } task_ttl = CONF.task.task_time_to_live self.task_type = 'import' self.task = self.task_factory.new_task(self.task_type, TENANT1, task_time_to_live=task_ttl, task_input=task_input) stores = {'cheap': 'file', 'fast': 'file'} self.config(enabled_backends=stores) store_api.register_store_opts(CONF, reserved_stores=RESERVED_STORES) self.config(default_backend='fast', group='glance_store') store_api.create_multi_stores(CONF, reserved_stores=RESERVED_STORES)
def init_app(): config.set_config_defaults() config_files = _get_config_files() CONF([], project='glance', default_config_files=config_files) logging.setup(CONF, "glance") # NOTE(danms): We are running inside uwsgi or mod_wsgi, so no eventlet; # use native threading instead. glance.async_.set_threadpool_model('native') atexit.register(drain_threadpools) # NOTE(danms): Change the default threadpool size since we # are dealing with native threads and not greenthreads. # Right now, the only pool of default size is tasks_pool, # so if others are created this will need to change to be # more specific. common.DEFAULT_POOL_SIZE = CONF.wsgi.task_pool_threads if CONF.enabled_backends: if store_utils.check_reserved_stores(CONF.enabled_backends): msg = _("'os_glance_' prefix should not be used in " "enabled_backends config option. It is reserved " "for internal use only.") raise RuntimeError(msg) glance_store.register_store_opts(CONF, reserved_stores=RESERVED_STORES) glance_store.create_multi_stores(CONF, reserved_stores=RESERVED_STORES) glance_store.verify_store() else: glance_store.register_opts(CONF) glance_store.create_stores(CONF) glance_store.verify_default_store() _setup_os_profiler() return config.load_paste_app('glance-api')
def setUp(self): """Establish a clean test environment.""" super(TestMultiStore, self).setUp() self.enabled_backends = { "file1": "file", "file2": "file", } self.conf = self._CONF self.conf(args=[]) self.conf.register_opt(cfg.DictOpt('enabled_backends')) self.config(enabled_backends=self.enabled_backends) store.register_store_opts(self.conf) self.config(default_backend='file1', group='glance_store') # Ensure stores + locations cleared location.SCHEME_TO_CLS_BACKEND_MAP = {} store.create_multi_stores(self.conf) self.addCleanup(setattr, location, 'SCHEME_TO_CLS_BACKEND_MAP', dict()) self.test_dir = self.useFixture(fixtures.TempDir()).path self.addCleanup(self.conf.reset) self.store = filesystem.Store(self.conf, backend='file1') self.config(filesystem_store_datadir=self.test_dir, filesystem_store_chunk_size=10, group="file1") self.store.configure() self.register_store_backend_schemes(self.store, 'file', 'file1')
def setup_multiple_stores(self, mock_client): """Configures multiple backend stores. This configures the API with two cinder stores (store1 and store2) as well as a os_glance_staging_store for imports. """ self.config(show_multiple_locations=True) self.config(show_image_direct_url=True) self.config(enabled_backends={'store1': 'cinder', 'store2': 'cinder'}) glance_store.register_store_opts(CONF, reserved_stores=wsgi.RESERVED_STORES) self.config(default_backend='store1', group='glance_store') self.config(cinder_volume_type='fast', group='store1') self.config(cinder_store_user_name='fake_user', group='store1') self.config(cinder_store_password='******', group='store1') self.config(cinder_store_project_name='fake_project', group='store1') self.config(cinder_store_auth_address='http://auth_addr', group='store1') self.config(cinder_volume_type='reliable', group='store2') self.config(cinder_store_user_name='fake_user', group='store2') self.config(cinder_store_password='******', group='store2') self.config(cinder_store_project_name='fake_project', group='store2') self.config(cinder_store_auth_address='http://auth_addr', group='store2') self.config(filesystem_store_datadir=self._store_dir('staging'), group='os_glance_staging_store') glance_store.create_multi_stores(CONF, reserved_stores=wsgi.RESERVED_STORES) glance_store.verify_store()
def setUp(self): """Establish a clean test environment.""" super(TestMultiStore, self).setUp() enabled_backends = { "file1": "file", "file2": "file", } self.conf = self._CONF self.conf(args=[]) self.conf.register_opt(cfg.DictOpt('enabled_backends')) self.config(enabled_backends=enabled_backends) store.register_store_opts(self.conf) self.config(default_backend='file1', group='glance_store') # Ensure stores + locations cleared location.SCHEME_TO_CLS_BACKEND_MAP = {} store.create_multi_stores(self.conf) self.addCleanup(setattr, location, 'SCHEME_TO_CLS_BACKEND_MAP', dict()) self.test_dir = self.useFixture(fixtures.TempDir()).path self.addCleanup(self.conf.reset) self.store = filesystem.Store(self.conf, backend='file1') self.config(filesystem_store_datadir=self.test_dir, filesystem_store_chunk_size=10, group="file1") self.store.configure() self.register_store_backend_schemes(self.store, 'file', 'file1')
def _create_multi_stores(self, passing_config=True): """Create known stores. Mock out sheepdog's subprocess dependency on collie. :param passing_config: making store driver passes basic configurations. :returns: the number of how many store drivers been loaded. """ self.config(enabled_backends={'file1': 'file', 'ceph1': 'rbd'}) store.register_store_opts(CONF) self.config(default_backend='file1', group='glance_store') self.config(filesystem_store_datadir=self.test_dir, group='file1') store.create_multi_stores(CONF)
def init_app(): config_files = _get_config_files() CONF([], project='glance', default_config_files=config_files) logging.setup(CONF, "glance") if CONF.enabled_backends: glance_store.register_store_opts(CONF) glance_store.create_multi_stores(CONF) glance_store.verify_store() else: glance_store.register_opts(CONF) glance_store.create_stores(CONF) glance_store.verify_default_store() _setup_os_profiler() return config.load_paste_app('glance-api')
def _create_multi_stores(self, passing_config=True): """Create known stores. Mock out sheepdog's subprocess dependency on collie. :param passing_config: making store driver passes basic configurations. :returns: the number of how many store drivers been loaded. """ self.config(enabled_backends={'file1': 'file', 'ceph1': 'rbd', 'readonly_store': 'http'}) store.register_store_opts(CONF) self.config(default_backend='file1', group='glance_store') self.config(filesystem_store_datadir=self.test_dir, group='file1') store.create_multi_stores(CONF)
def setUp(self, mock_api_session, mock_get_datastore): """Establish a clean test environment.""" super(TestMultiStore, self).setUp() enabled_backends = { "vmware1": "vmware", "vmware2": "vmware" } self.hash_algo = 'sha256' self.conf = self._CONF self.conf(args=[]) self.conf.register_opt(cfg.DictOpt('enabled_backends')) self.config(enabled_backends=enabled_backends) store.register_store_opts(self.conf) self.config(default_backend='vmware1', group='glance_store') # set vmware related config options self.config(group='vmware1', vmware_server_username='******', vmware_server_password='******', vmware_server_host='127.0.0.1', vmware_insecure='True', vmware_datastores=['a:b:0'], vmware_store_image_dir='/openstack_glance') self.config(group='vmware2', vmware_server_username='******', vmware_server_password='******', vmware_server_host='127.0.0.1', vmware_insecure='True', vmware_datastores=['a:b:1'], vmware_store_image_dir='/openstack_glance_1') # Ensure stores + locations cleared location.SCHEME_TO_CLS_BACKEND_MAP = {} store.create_multi_stores(self.conf) self.addCleanup(setattr, location, 'SCHEME_TO_CLS_BACKEND_MAP', dict()) self.addCleanup(self.conf.reset) vm_store.Store.CHUNKSIZE = 2 mock_get_datastore.side_effect = fake_datastore_obj self.store = vm_store.Store(self.conf, backend="vmware1") self.store.configure()
def _create_multi_stores(self, passing_config=True): """Create known stores. :param passing_config: making store driver passes basic configurations. :returns: the number of how many store drivers been loaded. """ self.config(enabled_backends={ 'fast': 'file', 'cheap': 'file', 'readonly_store': 'http' }) store.register_store_opts(CONF) self.config(default_backend='fast', group='glance_store') self.config(filesystem_store_datadir=self.test_dir, group='fast') self.config(filesystem_store_datadir=self.test_dir2, group='cheap') store.create_multi_stores(CONF)
def _test_prefetcher(self, mock_get_db): self.config(enabled_backends={'cheap': 'file'}) store.register_store_opts(CONF) self.config(filesystem_store_datadir='/tmp', group='cheap') store.create_multi_stores(CONF) tempf = tempfile.NamedTemporaryFile() tempf.write(b'foo') db = unit_test_utils.FakeDB(initialize=False) mock_get_db.return_value = db ctx = context.RequestContext(is_admin=True, roles=['admin']) gateway = glance_gateway.Gateway() image_factory = gateway.get_image_factory(ctx, authorization_layer=False) image_repo = gateway.get_repo(ctx, authorization_layer=False) fetcher = prefetcher.Prefetcher() # Create an image with no values set and queue it image = image_factory.new_image() image_repo.add(image) fetcher.cache.queue_image(image.image_id) # Image is not active, so it should fail to cache, but remain queued self.assertFalse(fetcher.run()) self.assertFalse(fetcher.cache.is_cached(image.image_id)) self.assertTrue(fetcher.cache.is_queued(image.image_id)) # Set the disk/container format and give it a location image.disk_format = 'raw' image.container_format = 'bare' image.status = 'active' loc = {'url': 'file://%s' % tempf.name, 'metadata': {'store': 'cheap'}} with mock.patch('glance.location._check_image_location'): # FIXME(danms): Why do I have to do this? image.locations = [loc] image_repo.save(image) # Image is now active and has a location, so it should cache self.assertTrue(fetcher.run()) self.assertTrue(fetcher.cache.is_cached(image.image_id)) self.assertFalse(fetcher.cache.is_queued(image.image_id))
def setUp(self): super(TestStagingStoreHousekeeping, self).setUp() self.config(enabled_backends={'store1': 'file'}) glance_store.register_store_opts( CONF, reserved_stores={'os_glance_staging_store': 'file'}) self.config(default_backend='store1', group='glance_store') self.config(filesystem_store_datadir=self._store_dir('store1'), group='store1') self.config(filesystem_store_datadir=self._store_dir('staging'), group='os_glance_staging_store') glance_store.create_multi_stores( CONF, reserved_stores={'os_glance_staging_store': 'file'}) self.db = unit_test_utils.FakeDB(initialize=False) self.cleaner = housekeeping.StagingStoreCleaner(self.db) self.context = context.get_admin_context()
def setUp(self): super(TestMultiCinderStore, self).setUp() enabled_backends = {"cinder1": "cinder", "cinder2": "cinder"} self.conf = self._CONF self.conf(args=[]) self.conf.register_opt(cfg.DictOpt('enabled_backends')) self.config(enabled_backends=enabled_backends) store.register_store_opts(self.conf) self.config(default_backend='cinder1', group='glance_store') # Ensure stores + locations cleared location.SCHEME_TO_CLS_BACKEND_MAP = {} store.create_multi_stores(self.conf) self.addCleanup(setattr, location, 'SCHEME_TO_CLS_BACKEND_MAP', dict()) self.test_dir = self.useFixture(fixtures.TempDir()).path self.addCleanup(self.conf.reset) self.store = cinder.Store(self.conf, backend="cinder1") self.store.configure() self.register_store_backend_schemes(self.store, 'cinder', 'cinder1') self.store.READ_CHUNKSIZE = 4096 self.store.WRITE_CHUNKSIZE = 4096 fake_sc = [{ u'endpoints': [{ u'publicURL': u'http://foo/public_url' }], u'endpoints_links': [], u'name': u'cinder', u'type': u'volumev2' }] self.context = FakeObject(service_catalog=fake_sc, user_id='fake_user', auth_token='fake_token', project_id='fake_project') self.fake_admin_context = mock.MagicMock() self.fake_admin_context.elevated.return_value = FakeObject( service_catalog=fake_sc, user_id='admin_user', auth_token='admin_token', project_id='admin_project')
def init_app(): config_files = _get_config_files() CONF([], project='glance', default_config_files=config_files) logging.setup(CONF, "glance") if CONF.enabled_backends: if store_utils.check_reserved_stores(CONF.enabled_backends): msg = _("'os_glance_' prefix should not be used in " "enabled_backends config option. It is reserved " "for internal use only.") raise RuntimeError(msg) glance_store.register_store_opts(CONF, reserved_stores=RESERVED_STORES) glance_store.create_multi_stores(CONF, reserved_stores=RESERVED_STORES) glance_store.verify_store() else: glance_store.register_opts(CONF) glance_store.create_stores(CONF) glance_store.verify_default_store() _setup_os_profiler() return config.load_paste_app('glance-api')
def setUp(self): """Establish a clean test environment.""" super(TestMultiStore, self).setUp() enabled_backends = {"ceph1": "rbd", "ceph2": "rbd"} self.conf = self._CONF self.conf(args=[]) self.conf.register_opt(cfg.DictOpt('enabled_backends')) self.config(enabled_backends=enabled_backends) store.register_store_opts(self.conf) self.config(default_backend='ceph1', group='glance_store') # Ensure stores + locations cleared g_location.SCHEME_TO_CLS_BACKEND_MAP = {} with mock.patch.object(rbd_store.Store, '_set_url_prefix'): store.create_multi_stores(self.conf) self.addCleanup(setattr, g_location, 'SCHEME_TO_CLS_BACKEND_MAP', dict()) self.addCleanup(self.conf.reset) rbd_store.rados = MockRados rbd_store.rbd = MockRBD self.store = rbd_store.Store(self.conf, backend="ceph1") self.store.configure() self.store.chunk_size = 2 self.called_commands_actual = [] self.called_commands_expected = [] self.store_specs = { 'pool': 'fake_pool', 'image': 'fake_image', 'snapshot': 'fake_snapshot' } self.location = rbd_store.StoreLocation(self.store_specs, self.conf) # Provide enough data to get more than one chunk iteration. self.data_len = 3 * units.Ki self.data_iter = six.BytesIO(b'*' * self.data_len)
def setUp(self): """Establish a clean test environment.""" super(TestMultiS3Store, self).setUp() enabled_backends = {"s3_region1": "s3", "s3_region2": "s3"} self.hash_algo = 'sha256' self.conf = self._CONF self.conf(args=[]) self.conf.register_opt(cfg.DictOpt('enabled_backends')) self.config(enabled_backends=enabled_backends) store.register_store_opts(self.conf) self.config(default_backend='s3_region1', group='glance_store') # set s3 related config options self.config(group='s3_region1', s3_store_access_key='user', s3_store_secret_key='key', s3_store_host='https://s3-region1.com', s3_store_bucket='glance', s3_store_large_object_size=5, s3_store_large_object_chunk_size=6) self.config(group='s3_region2', s3_store_access_key='user', s3_store_secret_key='key', s3_store_host='http://s3-region2.com', s3_store_bucket='glance', s3_store_large_object_size=5, s3_store_large_object_chunk_size=6) # Ensure stores + locations cleared location.SCHEME_TO_CLS_BACKEND_MAP = {} store.create_multi_stores(self.conf) self.addCleanup(setattr, location, 'SCHEME_TO_CLS_BACKEND_MAP', dict()) self.addCleanup(self.conf.reset) self.store = s3.Store(self.conf, backend="s3_region1") self.store.configure() self.register_store_backend_schemes(self.store, 's3', 's3_region1')
def setUp(self): """Establish a clean test environment.""" super(TestSheepdogMultiStore, self).setUp() enabled_backends = { "sheepdog1": "sheepdog", "sheepdog2": "sheepdog", } self.conf = self._CONF self.conf(args=[]) self.conf.register_opt(cfg.DictOpt('enabled_backends')) self.config(enabled_backends=enabled_backends) store.register_store_opts(self.conf) self.config(default_backend='sheepdog1', group='glance_store') # mock sheepdog commands def _fake_execute(*cmd, **kwargs): pass execute = mock.patch.object(processutils, 'execute').start() execute.side_effect = _fake_execute self.addCleanup(execute.stop) # Ensure stores + locations cleared location.SCHEME_TO_CLS_BACKEND_MAP = {} store.create_multi_stores(self.conf) self.addCleanup(setattr, location, 'SCHEME_TO_CLS_BACKEND_MAP', dict()) self.addCleanup(self.conf.reset) self.store = sheepdog.Store(self.conf, backend='sheepdog1') self.store.configure() self.store_specs = { 'image': '6bd59e6e-c410-11e5-ab67-0a73f1fda51b', 'addr': '127.0.0.1', 'port': 7000 }
def main(): # Used on Window, ensuring that a single scrubber can run at a time. mutex = None mutex_acquired = False try: if os.name == 'nt': # We can't rely on process names on Windows as there may be # wrappers with the same name. mutex = os_win_utilsfactory.get_mutex( name='Global\\glance-scrubber') mutex_acquired = mutex.acquire(timeout_ms=0) CONF.register_cli_opts(scrubber.scrubber_cmd_cli_opts) CONF.register_opts(scrubber.scrubber_cmd_opts) config.parse_args() logging.setup(CONF, 'glance') CONF.import_opt('enabled_backends', 'glance.common.wsgi') if CONF.enabled_backends: glance_store.register_store_opts(CONF) glance_store.create_multi_stores(CONF) glance_store.verify_store() else: glance_store.register_opts(CONF) glance_store.create_stores(CONF) glance_store.verify_default_store() if CONF.restore and CONF.daemon: sys.exit("ERROR: The restore and daemon options should not be set " "together. Please use either of them in one request.") app = scrubber.Scrubber(glance_store) if CONF.restore: if os.name == 'nt': scrubber_already_running = not mutex_acquired else: scrubber_already_running = scrubber_already_running_posix() if scrubber_already_running: already_running_msg = ( "ERROR: glance-scrubber is already running. " "Please ensure that the daemon is stopped.") sys.exit(already_running_msg) app.revert_image_status(CONF.restore) elif CONF.daemon: server = scrubber.Daemon(CONF.wakeup_time) server.start(app) server.wait() else: app.run() except (exception.ImageNotFound, exception.Conflict) as e: sys.exit("ERROR: %s" % e) except RuntimeError as e: sys.exit("ERROR: %s" % e) finally: if mutex and mutex_acquired: mutex.release()
def initialize_multi_store(): """Initialize glance multi store backends.""" glance_store.register_store_opts(CONF) glance_store.create_multi_stores(CONF) glance_store.verify_store()
def initialize_multi_store(): """Initialize glance multi store backends.""" glance_store.register_store_opts(CONF, reserved_stores=RESERVED_STORES) glance_store.create_multi_stores(CONF, reserved_stores=RESERVED_STORES) glance_store.verify_store()