def _init_driver(self, shard_id): """Given a shard name, returns a storage driver. :param shard_id: The name of a shard. :type shard_id: six.text_type :returns: a storage driver :rtype: marconi.queues.storage.base.DataDriver """ shard = self._shards_ctrl.get(shard_id, detailed=True) # NOTE(cpp-cabrera): make it *very* clear to data storage # drivers that we are operating in sharding mode. general_dict_opts = {"dynamic": True} general_opts = common_utils.dict_to_conf(general_dict_opts) # NOTE(cpp-cabrera): parse general opts: 'drivers' uri = shard["uri"] storage_type = six.moves.urllib_parse.urlparse(uri).scheme driver_dict_opts = {"storage": storage_type} driver_opts = common_utils.dict_to_conf(driver_dict_opts) # NOTE(cpp-cabrera): parse storage-specific opts: # 'drivers:storage:{type}' storage_dict_opts = shard["options"] storage_dict_opts["uri"] = shard["uri"] storage_opts = common_utils.dict_to_conf(storage_dict_opts) storage_group = u"drivers:storage:%s" % storage_type # NOTE(cpp-cabrera): register those options! conf = cfg.ConfigOpts() conf.register_opts(general_opts) conf.register_opts(driver_opts, group=u"drivers") conf.register_opts(storage_opts, group=storage_group) return utils.load_storage_driver(conf, self._cache)
def _init_shard(self, shard_id): # TODO(kgriffs): SHARDING - Read options from catalog backend conf = cfg.ConfigOpts() options = [cfg.StrOpt("storage", default="sqlite")] conf.register_opts(options, group="queues:drivers") return utils.load_storage_driver(conf)
def _init_driver(self, shard_id): """Given a shard name, returns a storage driver. :param shard_id: The name of a shard. :type shard_id: six.text_type :returns: a storage driver :rtype: marconi.queues.storage.base.DataDriver """ shard = self._shards_ctrl.get(shard_id, detailed=True) conf = utils.dynamic_conf(shard['uri'], shard['options']) return utils.load_storage_driver(conf, self._cache)
def _init_driver(self, pool_id): """Given a pool name, returns a storage driver. :param pool_id: The name of a pool. :type pool_id: six.text_type :returns: a storage driver :rtype: marconi.queues.storage.base.DataDriverBase """ pool = self._pools_ctrl.get(pool_id, detailed=True) conf = utils.dynamic_conf(pool['uri'], pool['options']) return utils.load_storage_driver(conf, self._cache)
def _init_driver(self, shard_id): """Given a shard name, returns a storage driver. :param shard_id: The name of a shard. :type shard_id: six.text_type :returns: a storage driver :rtype: marconi.queues.storage.base.DataDriver """ shard = self._shards_ctrl.get(shard_id, detailed=True) conf = utils.dynamic_conf(shard['uri'], shard['options']) return utils.load_storage_driver(conf, self._cache)
def storage(self): LOG.debug(_(u'Loading storage driver')) if self.conf.sharding: LOG.debug(_(u'Storage sharding enabled')) storage_driver = sharding.DataDriver(self.conf) else: storage_driver = storage_utils.load_storage_driver(self.conf) LOG.debug(_(u'Loading storage pipeline')) return pipeline.DataDriver(self.conf, storage_driver)
def storage(self): LOG.debug(_(u'Loading storage driver')) if self.conf.sharding: LOG.debug(_(u'Storage sharding enabled')) storage_driver = sharding.DataDriver(self.conf) else: storage_driver = storage_utils.load_storage_driver(self.conf) LOG.debug(_(u'Loading storage pipeline')) return pipeline.DataDriver(self.conf, storage_driver)
def _init_shard(self, shard_id): # TODO(kgriffs): SHARDING - Read options from catalog backend conf = cfg.ConfigOpts() general_opts = [cfg.BoolOpt('admin_mode', default=False)] options = [ cfg.StrOpt('storage', default='sqlite'), ] conf.register_opts(general_opts) conf.register_opts(options, group='queues:drivers') return utils.load_storage_driver(conf)
def storage(self): LOG.debug(u'Loading storage driver') if self.conf.pooling: LOG.debug(u'Storage pooling enabled') storage_driver = pooling.DataDriver(self.conf, self.cache, self.control) else: storage_driver = storage_utils.load_storage_driver( self.conf, self.cache) LOG.debug(u'Loading storage pipeline') return pipeline.DataDriver(self.conf, storage_driver)
def storage(self): LOG.debug(u'Loading storage driver') if self.conf.pooling: LOG.debug(u'Storage pooling enabled') storage_driver = pooling.DataDriver(self.conf, self.cache, self.control) else: storage_driver = storage_utils.load_storage_driver( self.conf, self.cache) LOG.debug(u'Loading storage pipeline') return pipeline.DataDriver(self.conf, storage_driver)
def _init_shard(self, shard_id): # TODO(kgriffs): SHARDING - Read options from catalog backend conf = cfg.ConfigOpts() general_opts = [ cfg.BoolOpt('admin_mode', default=False) ] options = [ cfg.StrOpt('storage', default='sqlite'), ] conf.register_opts(general_opts) conf.register_opts(options, group='queues:drivers') return utils.load_storage_driver(conf)
def setUp(self): super(PoolQueuesTest, self).setUp() conf = self.load_conf('wsgi_mongodb_pooled.conf') conf.register_opts([cfg.StrOpt('storage')], group='drivers') cache = oslo_cache.get_cache() control = utils.load_storage_driver(conf, cache, control_mode=True) self.pools_ctrl = control.pools_controller self.driver = pooling.DataDriver(conf, cache, control) self.controller = self.driver.queue_controller # fake two pools for _ in six.moves.xrange(2): self.pools_ctrl.create(str(uuid.uuid1()), 100, 'sqlite://:memory:')
def setUp(self): super(ShardQueuesTest, self).setUp() conf = self.load_conf('wsgi_mongodb_sharded.conf') conf.register_opts([cfg.StrOpt('storage')], group='drivers') cache = oslo_cache.get_cache() control = utils.load_storage_driver(conf, cache, control_mode=True) self.shards_ctrl = control.shards_controller self.driver = sharding.DataDriver(conf, cache, control) self.controller = self.driver.queue_controller # fake two shards for _ in xrange(2): self.shards_ctrl.create(str(uuid.uuid1()), 100, 'sqlite://memory')
def setUp(self): super(ShardCatalogTest, self).setUp() self.conf.register_opts([cfg.StrOpt("storage")], group="drivers") cache = oslo_cache.get_cache() control = utils.load_storage_driver(self.conf, cache, control_mode=True) self.catalogue_ctrl = control.catalogue_controller self.shards_ctrl = control.shards_controller # NOTE(cpp-cabrera): populate catalogue self.shard = str(uuid.uuid1()) self.queue = str(uuid.uuid1()) self.project = str(uuid.uuid1()) self.shards_ctrl.create(self.shard, 100, "sqlite://:memory:") self.catalogue_ctrl.insert(self.project, self.queue, self.shard) self.catalog = sharding.Catalog(self.conf, cache, control)
def setUp(self): super(ShardCatalogTest, self).setUp() self.conf.register_opts([cfg.StrOpt('storage')], group='drivers') cache = oslo_cache.get_cache() control = utils.load_storage_driver(self.conf, cache, control_mode=True) self.catalogue_ctrl = control.catalogue_controller self.shards_ctrl = control.shards_controller # NOTE(cpp-cabrera): populate catalogue self.shard = str(uuid.uuid1()) self.queue = str(uuid.uuid1()) self.project = str(uuid.uuid1()) self.shards_ctrl.create(self.shard, 100, 'sqlite://memory') self.catalogue_ctrl.insert(self.project, self.queue, self.shard) self.catalog = sharding.Catalog(self.conf, cache, control)
def control(self): LOG.debug(u'Loading storage control driver') return storage_utils.load_storage_driver(self.conf, self.cache, control_mode=True)
def control(self): LOG.debug(_(u'Loading storage control driver')) return storage_utils.load_storage_driver(self.conf, self.cache, control_mode=True)