def __init__(self, server, persistence_config, backend_config, ember_config=None, **kwargs): cinderlib_extra_config = ember_config.copy() cinderlib_extra_config.pop('disabled') cinderlib.setup(persistence_config=persistence_config, **cinderlib_extra_config) self.backend = cinderlib.Backend(**backend_config) IdentityBase.__init__(self, server, ember_config) self.CSI.add_ControllerServicer_to_server(self, server) self.DELETE_RESP = self.TYPES.DeleteResp() self.CTRL_UNPUBLISH_RESP = self.TYPES.UnpublishResp() capab = [ self.TYPES.CtrlCapability(rpc=self.TYPES.CtrlRPC(type=rpc)) for rpc in self.CTRL_CAPABILITIES ] self.CTRL_CAPABILITIES_RESP = self.TYPES.CtrlCapabilityResp( capabilities=capab) if len(self.backend.pool_names) > 1: LOG.info('Available pools: %s' % ', '.join(self.backend.pool_names))
def __init__(self, server, persistence_config=None, ember_config=None, node_id=None, storage_nw_ip=None, **kwargs): # When running as Node only we have to initialize cinderlib telling it # not to fail when there's no backend configured. if persistence_config: cinderlib_extra_config = ember_config.copy() cinderlib_extra_config.pop('disabled') cinderlib_extra_config['fail_on_missing_backend'] = False cinderlib.setup(persistence_config=persistence_config, **cinderlib_extra_config) IdentityBase.__init__(self, server, ember_config) self.node_info = common.NodeInfo.set(node_id, storage_nw_ip) self.CSI.add_NodeServicer_to_server(self, server) self.STAGE_RESP = self.TYPES.StageResp() self.UNSTAGE_RESP = self.TYPES.UnstageResp() self.NODE_PUBLISH_RESP = self.TYPES.NodePublishResp() self.NODE_UNPUBLISH_RESP = self.TYPES.NodeUnpublishResp() capabilities = [ self.TYPES.NodeCapability(rpc=self.TYPES.NodeRPC(type=rpc)) for rpc in self.NODE_CAPABILITIES ] self.NODE_CAPABILITIES_RESP = self.TYPES.NodeCapabilityResp( capabilities=capabilities)
def _setup(self, storage_data): if not storage_data: return None cinderlib.setup(**storage_data[common.PROVIDER_CONFIG]) backend = cinderlib.Backend(**storage_data[common.BACKEND_CONFIG]) return backend
def setUpClass(cls): cls._replace_oslo_cli_parse() config = cls.ensure_config_loaded() # Use memory_db persistence instead of memory to ensure migrations work cinderlib.setup(root_helper=cls.ROOT_HELPER, disable_logs=not config['logs'], debug=config['debug'], persistence_config={'storage': 'memory_db'}) if cls.MEMORY_PERSISTENCE: # Now replace it with the memory plugin for the tests to ensure the # Cinder driver is compatible with the persistence plugin # mechanism, as the DB plugin could hide issues. cinderlib.Backend.global_initialization = False cinderlib.setup(root_helper=cls.ROOT_HELPER, disable_logs=not config['logs'], debug=config['debug'], persistence_config={'storage': 'memory'}) # Initialize backends cls.backends = [cinderlib.Backend(**cfg) for cfg in config['backends']] # Lazy load backend's _volumes variable using the volumes property so # new volumes are added to this list on successful creation. for backend in cls.backends: backend.volumes # Set current backend, by default is the first cls.backend = cls.backends[0] cls.size_precision = config['size_precision']
def __init__(self, server, persistence_config, backend_config, cinderlib_config=None, default_size=DEFAULT_SIZE, **kwargs): self.default_size = default_size cinderlib.setup(persistence_config=persistence_config, **cinderlib_config) self.backend = cinderlib.Backend(**backend_config) Identity.__init__(self, server, cinderlib_config) csi.add_ControllerServicer_to_server(self, server)
def __init__(self, server, persistence_config, backend_config, ember_config=None, default_size=DEFAULT_SIZE, **kwargs): self.default_size = default_size plugin_name = ember_config.pop('plugin_name', None) cinderlib.setup(persistence_config=persistence_config, **ember_config) self.backend = cinderlib.Backend(**backend_config) Identity.__init__(self, server, ember_config, plugin_name) csi.add_ControllerServicer_to_server(self, server)
def load_backend(args): persistence_config = {'storage': 'db', 'connection': args.db_url} cl.setup(file_locks_path=conf.get('ENGINE_TMP'), persistence_config=persistence_config, ssh_hosts_key_file=get_ssh_known_hosts(), disable_logs=False) # Setup logging here to not have our logger overridden by cinderlib's setup_logger(args) return cl.Backend(**json.loads(args.driver))
def __init__(self, server, persistence_config=None, cinderlib_config=None, node_id=None, storage_nw_ip=None, **kwargs): if persistence_config: cinderlib_config['fail_on_missing_backend'] = False cinderlib.setup(persistence_config=persistence_config, **cinderlib_config) Identity.__init__(self, server, cinderlib_config) node_id = node_id or socket.getfqdn() self.node_id = types.IdResp(node_id=node_id) self.node_info = NodeInfo.set(node_id, storage_nw_ip) csi.add_NodeServicer_to_server(self, server)
def setUpClass(cls): # Save OVO methods that some persistence plugins mess up cls.ovo_methods = {} for ovo_name in cinder_base_ovo.CinderObjectRegistry.obj_classes(): ovo_cls = getattr(objects, ovo_name) cls.ovo_methods[ovo_name] = { 'save': getattr(ovo_cls, 'save', None), 'get_by_id': getattr(ovo_cls, 'get_by_id', None), } cls.original_impl = volume_cmd.session.IMPL cinderlib.Backend.global_initialization = False cinderlib.setup(persistence_config=cls.PERSISTENCE_CFG)
def setUpClass(cls): cls._replace_oslo_cli_parse() config = cls.ensure_config_loaded() # Use memory_db persistence instead of memory to ensure migrations work cinderlib.setup(root_helper=cls.ROOT_HELPER, disable_logs=not config['logs'], persistence_config={'storage': 'memory_db'}) # Initialize backends cls.backends = [cinderlib.Backend(**cfg) for cfg in config['backends']] # Lazy load backend's _volumes variable using the volumes property so # new volumes are added to this list on successful creation. for backend in cls.backends: backend.volumes # Set current backend, by default is the first cls.backend = cls.backends[0] cls.size_precision = config['size_precision']
def setUpClass(cls): config = cls.ensure_config_loaded() if config['venv_sudo']: # NOTE(geguileo): For some drivers need to use a custom sudo script # to find virtualenv commands (ie: cinder-rtstool). path = os.path.dirname(os.path.abspath(os.path.realpath(__file__))) cls.root_helper = os.path.join(path, 'virtualenv-sudo.sh') else: cls.root_helper = 'sudo' cinderlib.setup(root_helper=cls.root_helper, disable_logs=not config['logs']) # Initialize backends cls.backends = [cinderlib.Backend(**cfg) for cfg in config['backends']] # Set current backend, by default is the first cls.backend = cls.backends[0] cls.size_precision = config['size_precision']
def setUpClass(cls): cls.original_impl = volume_cmd.session.IMPL cinderlib.Backend.global_initialization = False cinderlib.setup(persistence_config=cls.PERSISTENCE_CFG)
import cinderlib from cinderlib.tests.unit import utils def _replace_oslo_cli_parse(): original_cli_parser = cfg.ConfigOpts._parse_cli_opts def _parse_cli_opts(self, args): return original_cli_parser(self, []) cfg.ConfigOpts._parse_cli_opts = six.create_unbound_method( _parse_cli_opts, cfg.ConfigOpts) _replace_oslo_cli_parse() cinderlib.setup(persistence_config={'storage': utils.get_mock_persistence()}) class BaseTest(unittest.TestCase): PERSISTENCE_CFG = None def setUp(self): if not self.PERSISTENCE_CFG: cfg = {'storage': utils.get_mock_persistence()} cinderlib.Backend.set_persistence(cfg) self.backend_name = 'fake_backend' self.backend = utils.FakeBackend(volume_backend_name=self.backend_name) self.persistence = self.backend.persistence cinderlib.Backend._volumes_inflight = {} def tearDown(self):
def setUpClass(cls): cls.original_impl = volume_cmd.session.IMPL # We check the entrypoint is working cinderlib.setup(persistence_config=cls.PERSISTENCE_CFG) cls.persistence = cinderlib.Backend.persistence cls.context = cinderlib.objects.CONTEXT
import logging import functools import sys import cinderlib as cl from eventlet import tpool import grpc import storage_pb2 from storage_pb2 import Response import storage_pb2_grpc from concurrent import futures cl.setup(disable_logs=False) ceph = cl.Backend( volume_backend_name='ceph', volume_driver='cinder.volume.drivers.rbd.RBDDriver', rbd_user='******', rbd_pool='volumes', rbd_ceph_conf='/etc/ceph/ceph.conf', rbd_keyring_conf='/etc/ceph/ceph.client.admin.keyring', ) logging.basicConfig(stream=sys.stdout, level=logging.DEBUG) log = logging.getLogger(__name__) log.info("Finished setting cinderlib backend...") class ServerProxy(tpool.Proxy): @staticmethod def _my_doit(method, *args, **kwargs): # cygrpc.Server methods don't acept proxied completion_queue
def load_backend(args): persistence_config = {'storage': 'db', 'connection': args.db_url} cl.setup(persistence_config=persistence_config) return cl.Backend(**json.loads(args.driver))