def main(): priv_context.init(root_helper=shlex.split(utils.get_root_helper())) cfg.CONF.register_cli_opts(cli_opts) service.prepare_service(sys.argv) solum.TLS.trace = trace_data.TraceData() LOG.info(_('Starting server in PID %s') % os.getpid()) LOG.debug("Configuration:") logging.setup(cfg.CONF, 'solum') cfg.CONF.import_opt('topic', 'solum.worker.config', group='worker') cfg.CONF.import_opt('host', 'solum.worker.config', group='worker') cfg.CONF.import_opt('handler', 'solum.worker.config', group='worker') handlers = { 'noop': noop_handler.Handler, 'default': default_handler.Handler, 'shell': shell_handler.Handler, } endpoints = [ handlers[cfg.CONF.worker.handler](), ] server = rpc_service.Service(cfg.CONF.worker.topic, cfg.CONF.worker.host, endpoints) server.serve()
def main(): objects.register_all() gmr_opts.set_defaults(CONF) CONF(sys.argv[1:], project='cinder', version=version.version_string()) logging.set_defaults( default_log_levels=logging.get_default_log_levels() + _EXTRA_DEFAULT_LOG_LEVELS) logging.setup(CONF, "cinder") python_logging.captureWarnings(True) priv_context.init(root_helper=shlex.split(utils.get_root_helper())) utils.monkey_patch() gmr.TextGuruMeditation.setup_autorun(version, conf=CONF) global LOG LOG = logging.getLogger(__name__) if CONF.backup_workers > 1: LOG.info('Backup running with %s processes.', CONF.backup_workers) launcher = service.get_launcher() for i in range(CONF.backup_workers): _launch_backup_process(launcher, i) launcher.wait() else: LOG.info('Backup running in single process mode.') server = service.Service.create(binary='cinder-backup', coordination=True, process_number=1) service.serve(server) service.wait()
def main(): objects.register_all() gmr_opts.set_defaults(CONF) CONF(sys.argv[1:], project='cinder', version=version.version_string()) logging.setup(CONF, "cinder") python_logging.captureWarnings(True) priv_context.init(root_helper=shlex.split(utils.get_root_helper())) utils.monkey_patch() gmr.TextGuruMeditation.setup_autorun(version, conf=CONF) global LOG LOG = logging.getLogger(__name__) if not CONF.enabled_backends: LOG.error('Configuration for cinder-volume does not specify ' '"enabled_backends". Using DEFAULT section to configure ' 'drivers is not supported since Ocata.') sys.exit(1) if os.name == 'nt': # We cannot use oslo.service to spawn multiple services on Windows. # It relies on forking, which is not available on Windows. # Furthermore, service objects are unmarshallable objects that are # passed to subprocesses. _launch_services_win32() else: _launch_services_posix()
def test_init_known_contexts(self): self.assertEqual( testctx.context.helper_command('/sock')[:2], ['sudo', 'privsep-helper']) priv_context.init(root_helper=['sudo', 'rootwrap']) self.assertEqual( testctx.context.helper_command('/sock')[:3], ['sudo', 'rootwrap', 'privsep-helper'])
def setUp(self): super(TestDaemon, self).setUp() venv_path = os.environ['VIRTUAL_ENV'] self.cfg_fixture = self.useFixture(config_fixture.Config()) self.cfg_fixture.config( group='privsep', helper_command='sudo -E %s/bin/privsep-helper' % venv_path) priv_context.init()
def main(): priv_context.init(root_helper=shlex.split(utils.get_root_helper())) zun_service.prepare_service(sys.argv) config.parse_args(sys.argv) # Initialize o.vo registry. os_vif.initialize() service.CNIDaemonServiceManager().run()
def main(): conf = cfg.ConfigOpts() conf.register_cli_opts(CLI_OPTS) service.prepare_service(conf=conf) priv_context.init(root_helper=shlex.split(utils._get_root_helper())) sm = cotyledon.ServiceManager() sm.add(create_polling_service, args=(conf,)) oslo_config_glue.setup(sm, conf) sm.run()
def main(): conf = cfg.ConfigOpts() conf.register_cli_opts(CLI_OPTS) service.prepare_service(conf=conf) priv_context.init(root_helper=shlex.split(utils._get_root_helper())) sm = cotyledon.ServiceManager() sm.add(create_polling_service, args=(conf, )) oslo_config_glue.setup(sm, conf) sm.run()
def main(): objects.register_all() gmr_opts.set_defaults(CONF) CONF(sys.argv[1:], project='cinder', version=version.version_string()) logging.setup(CONF, "cinder") python_logging.captureWarnings(True) priv_context.init(root_helper=shlex.split(utils.get_root_helper())) utils.monkey_patch() gmr.TextGuruMeditation.setup_autorun(version, conf=CONF) launcher = service.get_launcher() LOG = logging.getLogger(__name__) service_started = False if CONF.enabled_backends: for backend in filter(None, CONF.enabled_backends): CONF.register_opt(host_opt, group=backend) backend_host = getattr(CONF, backend).backend_host host = "%s@%s" % (backend_host or CONF.host, backend) # We also want to set cluster to None on empty strings, and we # ignore leading and trailing spaces. cluster = CONF.cluster and CONF.cluster.strip() cluster = (cluster or None) and '%s@%s' % (cluster, backend) try: server = service.Service.create(host=host, service_name=backend, binary='cinder-volume', coordination=True, cluster=cluster) except Exception: msg = _('Volume service %s failed to start.') % host LOG.exception(msg) else: # Dispose of the whole DB connection pool here before # starting another process. Otherwise we run into cases where # child processes share DB connections which results in errors. session.dispose_engine() launcher.launch_service(server) service_started = True else: LOG.warning(_LW('Configuration for cinder-volume does not specify ' '"enabled_backends", using DEFAULT as backend. ' 'Support for DEFAULT section to configure drivers ' 'will be removed in the next release.')) server = service.Service.create(binary='cinder-volume', coordination=True, cluster=CONF.cluster) launcher.launch_service(server) service_started = True if not service_started: msg = _('No volume service(s) started successfully, terminating.') LOG.error(msg) sys.exit(1) launcher.wait()
def main(): # Parse config file and command line options, then start logging cyborg_service.prepare_service(sys.argv) priv_context.init(root_helper=shlex.split('sudo')) mgr = cyborg_service.RPCService('cyborg.agent.manager', 'AgentManager', constants.AGENT_TOPIC) launcher = service.launch(CONF, mgr, restart_method='mutate') launcher.wait()
def main(): objects.register_all() gmr_opts.set_defaults(CONF) CONF(sys.argv[1:], project='cinder', version=version.version_string()) logging.setup(CONF, "cinder") python_logging.captureWarnings(True) priv_context.init(root_helper=shlex.split(utils.get_root_helper())) utils.monkey_patch() gmr.TextGuruMeditation.setup_autorun(version, conf=CONF) server = service.Service.create(binary='cinder-backup') service.serve(server) service.wait()
def main(): sm = cotyledon.ServiceManager() # On Windows, we can only initialize conf objects in the subprocess. # As a consequence, we can't use oslo_config_glue.setup() on Windows, # because cotyledon.ServiceManager objects are not picklable. if os.name == 'nt': sm.add(create_polling_service) else: conf = _prepare_config() priv_context.init(root_helper=shlex.split(utils._get_root_helper())) oslo_config_glue.setup(sm, conf) sm.add(create_polling_service, args=(conf, )) sm.run()
def main(): priv_context.init(root_helper=shlex.split(utils.get_root_helper())) zun_service.prepare_service(sys.argv) LOG.info('Starting server in PID %s', os.getpid()) CONF.log_opt_values(LOG, logging.DEBUG) CONF.import_opt('topic', 'zun.conf.compute', group='compute') from zun.compute import manager as compute_manager endpoints = [ compute_manager.Manager(), ] server = rpc_service.Service.create(CONF.compute.topic, CONF.host, endpoints, binary='zun-compute') launcher = service.launch(CONF, server, restart_method='mutate') launcher.wait()
def main(): config.parse_args(sys.argv) logging.setup(CONF, 'nova') priv_context.init(root_helper=shlex.split(utils.get_root_helper())) utils.monkey_patch() objects.register_all() # Ensure os-vif objects are registered and plugins loaded os_vif.initialize() gmr.TextGuruMeditation.setup_autorun(version) cmd_common.block_db_access('nova-compute') objects_base.NovaObject.indirection_api = conductor_rpcapi.ConductorAPI() server = service.Service.create(binary='nova-compute', topic=compute_rpcapi.RPC_TOPIC) service.serve(server) service.wait()
def main(): config.parse_args(sys.argv) logging.setup(CONF, 'nova') priv_context.init(root_helper=shlex.split(utils.get_root_helper())) objects.register_all() gmr_opts.set_defaults(CONF) # Ensure os-vif objects are registered and plugins loaded os_vif.initialize() gmr.TextGuruMeditation.setup_autorun(version, conf=CONF) cmd_common.block_db_access('nova-compute') objects_base.NovaObject.indirection_api = conductor_rpcapi.ConductorAPI() objects.Service.enable_min_version_cache() server = service.Service.create(binary='nova-compute', topic=compute_rpcapi.RPC_TOPIC) service.serve(server) service.wait()
def main(): priv_context.init(root_helper=shlex.split(utils.get_root_helper())) zun_service.prepare_service(sys.argv) LOG.info('Starting server in PID %s', os.getpid()) CONF.log_opt_values(LOG, logging.DEBUG) CONF.import_opt('topic', 'zun.conf.compute', group='compute') endpoints = [ compute_manager.Manager(), ] server = rpc_service.Service.create(CONF.compute.topic, CONF.host, endpoints, binary='zun-compute') launcher = service.launch(CONF, server) launcher.wait()
def main(): config.parse_args(sys.argv) logging.setup(CONF, "nova") priv_context.init(root_helper=shlex.split(utils.get_root_helper())) utils.monkey_patch() objects.register_all() gmr.TextGuruMeditation.setup_autorun(version) if not CONF.conductor.use_local: cmd_common.block_db_access("nova-compute") objects_base.NovaObject.indirection_api = conductor_rpcapi.ConductorAPI() else: LOG.warning(_LW("Conductor local mode is deprecated and will " "be removed in a subsequent release")) server = service.Service.create( binary="nova-compute", topic=CONF.compute_topic, db_allowed=CONF.conductor.use_local ) service.serve(server) service.wait()
def main(): config.parse_args(sys.argv) logging.setup(CONF, 'nova') priv_context.init(root_helper=shlex.split(utils.get_root_helper())) utils.monkey_patch() objects.register_all() gmr.TextGuruMeditation.setup_autorun(version) if not CONF.conductor.use_local: block_db_access() objects_base.NovaObject.indirection_api = \ conductor_rpcapi.ConductorAPI() else: LOG.warning(_LW('Conductor local mode is deprecated and will ' 'be removed in a subsequent release')) server = service.Service.create(binary='nova-compute', topic=CONF.compute_topic, db_allowed=CONF.conductor.use_local) service.serve(server) service.wait()
def init(root_helper='sudo'): global ROOT_HELPER ROOT_HELPER = root_helper priv_context.init(root_helper=[root_helper]) existing_bgcp = connector.get_connector_properties existing_bcp = connector.InitiatorConnector.factory def my_bgcp(*args, **kwargs): if len(args): args = list(args) args[0] = ROOT_HELPER else: kwargs['root_helper'] = ROOT_HELPER kwargs['execute'] = _execute return existing_bgcp(*args, **kwargs) def my_bgc(protocol, *args, **kwargs): if len(args): # args is a tuple and we cannot do assignments args = list(args) args[0] = ROOT_HELPER else: kwargs['root_helper'] = ROOT_HELPER kwargs['execute'] = _execute # OS-Brick's implementation for RBD is not good enough for us if protocol == 'rbd': factory = RBDConnector else: factory = functools.partial(existing_bcp, protocol) return factory(*args, **kwargs) connector.get_connector_properties = my_bgcp connector.InitiatorConnector.factory = staticmethod(my_bgc) if hasattr(rootwrap, 'unlink_root'): rootwrap.unlink_root = unlink_root
def main(): objects.register_all() gmr_opts.set_defaults(CONF) CONF(sys.argv[1:], project='cinder', version=version.version_string()) logging.set_defaults(default_log_levels=logging.get_default_log_levels() + _EXTRA_DEFAULT_LOG_LEVELS) logging.setup(CONF, "cinder") python_logging.captureWarnings(True) priv_context.init(root_helper=shlex.split(utils.get_root_helper())) utils.monkey_patch() gmr.TextGuruMeditation.setup_autorun(version, conf=CONF) global LOG LOG = logging.getLogger(__name__) semaphore = utils.semaphore_factory(CONF.backup_max_operations, CONF.backup_workers) LOG.info('Backup running with %s processes.', CONF.backup_workers) launcher = service.get_launcher() for i in range(1, CONF.backup_workers + 1): _launch_backup_process(launcher, i, semaphore) launcher.wait()
def _set_priv_helper(cls, root_helper): # If we are using a virtual environment then the rootwrap config files # Should be within the environment and not under /etc/cinder/ venv = os.environ.get('VIRTUAL_ENV') if (venv and not cfg.CONF.rootwrap_config.startswith(venv) and not os.path.exists(cfg.CONF.rootwrap_config)): # We need to remove the absolute path (initial '/') to generate the # config path under the virtualenv # for the join to work. wrap_path = cfg.CONF.rootwrap_config[1:] venv_wrap_file = os.path.join(venv, wrap_path) venv_wrap_dir = os.path.dirname(venv_wrap_file) # In virtual environments our rootwrap config file is no longer # '/etc/cinder/rootwrap.conf'. We have 2 possible roots, it's # either the virtualenv's directory or our where our sources are if # we have installed cinder as editable. # For editable we need to copy the files into the virtualenv if we # haven't copied them before. if not utils.__file__.startswith(venv): # If we haven't copied the files yet if not os.path.exists(venv_wrap_file): editable_link = glob.glob( os.path.join( venv, 'lib/python*/site-packages/cinder.egg-link')) with open(editable_link[0], 'r') as f: cinder_source_path = f.read().split('\n')[0] cinder_source_etc = os.path.join(cinder_source_path, 'etc/cinder') shutil.copytree(cinder_source_etc, venv_wrap_dir) # For venvs we need to update configured filters_path and exec_dirs parser = configparser.ConfigParser() parser.read(venv_wrap_file) # Change contents if we haven't done it already if not parser['DEFAULT']['filters_path'].startswith(venv_wrap_dir): parser['DEFAULT']['filters_path'] = os.path.join( venv_wrap_dir, 'rootwrap.d') parser['DEFAULT']['exec_dirs'] = ( os.path.join(venv, 'bin,') + parser['DEFAULT']['exec_dirs']) with open(venv_wrap_file, 'w') as f: parser.write(f) # Don't use set_override because it doesn't work as it should cfg.CONF.rootwrap_config = venv_wrap_file # The default Cinder roothelper in Cinder and privsep is sudo, so # nothing to do in those cases. if root_helper != 'sudo': # Get the current helper (usually 'sudo cinder-rootwrap # <CONF.rootwrap_config>') and replace the sudo part original_helper = utils.get_root_helper() # If we haven't already set the helper if root_helper not in original_helper: new_helper = original_helper.replace('sudo', root_helper) utils.get_root_helper = lambda: new_helper # Initialize privsep's context to not use 'sudo' priv_context.init(root_helper=[root_helper]) # When using privsep from the system we need to replace the # privsep-helper with our own to use the virtual env libraries. if venv and not priv_context.__file__.startswith(venv): # Use importlib.resources to support PEP 302-based import hooks # Can only use importlib.resources on 3.10 because it was added to # 3.7, but files to 3.9 and namespace packages only to 3.10 import sys if sys.version_info[:2] > (3, 10): from importlib.resources import files else: from importlib_resources import files privhelper = files('cinderlib.bin').joinpath('venv-privsep-helper') cmd = f'{root_helper} {privhelper}' # Change default of the option instead of the value of the # different contexts for opt in priv_context.OPTS: if opt.name == 'helper_command': opt.default = cmd break # Don't use server/client mode when running as root client_mode = not cls.im_root cinder.privsep.sys_admin_pctxt.set_client_mode(client_mode) os_brick.privileged.default.set_client_mode(client_mode)
def _open_cinder_volume(self, client, volume, mode): attach_mode = 'rw' if mode == 'wb' else 'ro' device = None root_helper = get_root_helper(backend=self.backend_group) priv_context.init(root_helper=shlex.split(root_helper)) host = socket.gethostname() properties = connector.get_connector_properties(root_helper, host, False, False) try: volume.reserve(volume) except cinder_exception.ClientException as e: msg = (_('Failed to reserve volume %(volume_id)s: %(error)s') % {'volume_id': volume.id, 'error': e}) LOG.error(msg) raise exceptions.BackendException(msg) try: connection_info = volume.initialize_connection(volume, properties) conn = connector.InitiatorConnector.factory( connection_info['driver_volume_type'], root_helper, conn=connection_info) device = conn.connect_volume(connection_info['data']) volume.attach(None, 'glance_store', attach_mode, host_name=host) volume = self._wait_volume_status(volume, 'attaching', 'in-use') if (connection_info['driver_volume_type'] == 'rbd' and not conn.do_local_attach): yield device['path'] else: with temporary_chown(device['path'], backend=self.backend_group), \ open(device['path'], mode) as f: yield f except Exception: LOG.exception(_LE('Exception while accessing to cinder volume ' '%(volume_id)s.'), {'volume_id': volume.id}) raise finally: if volume.status == 'in-use': volume.begin_detaching(volume) elif volume.status == 'attaching': volume.unreserve(volume) if device: try: conn.disconnect_volume(connection_info['data'], device) except Exception: LOG.exception(_LE('Failed to disconnect volume ' '%(volume_id)s.'), {'volume_id': volume.id}) try: volume.terminate_connection(volume, properties) except Exception: LOG.exception(_LE('Failed to terminate connection of volume ' '%(volume_id)s.'), {'volume_id': volume.id}) try: client.volumes.detach(volume) except Exception: LOG.exception(_LE('Failed to detach volume %(volume_id)s.'), {'volume_id': volume.id})
def _open_cinder_volume(self, client, volume, mode): attach_mode = 'rw' if mode == 'wb' else 'ro' device = None root_helper = self.get_root_helper() priv_context.init(root_helper=shlex.split(root_helper)) host = socket.gethostname() use_multipath = self.store_conf.cinder_use_multipath enforce_multipath = self.store_conf.cinder_enforce_multipath mount_point_base = self.store_conf.cinder_mount_point_base properties = connector.get_connector_properties( root_helper, host, use_multipath, enforce_multipath) try: volume.reserve(volume) except cinder_exception.ClientException as e: msg = (_('Failed to reserve volume %(volume_id)s: %(error)s') % { 'volume_id': volume.id, 'error': e }) LOG.error(msg) raise exceptions.BackendException(msg) try: connection_info = volume.initialize_connection(volume, properties) conn = connector.InitiatorConnector.factory( connection_info['driver_volume_type'], root_helper, conn=connection_info) if connection_info['driver_volume_type'] == 'nfs': if volume.encrypted: volume.unreserve(volume) volume.delete() msg = (_('Encrypted volume creation for cinder nfs is not ' 'supported from glance_store. Failed to create ' 'volume %(volume_id)s') % { 'volume_id': volume.id }) LOG.error(msg) raise exceptions.BackendException(msg) @utils.synchronized(connection_info['data']['export']) def connect_volume_nfs(): data = connection_info['data'] export = data['export'] vol_name = data['name'] mountpoint = self._get_mount_path( export, os.path.join(mount_point_base, 'nfs')) options = data['options'] self.mount.mount('nfs', export, vol_name, mountpoint, host, root_helper, options) return {'path': os.path.join(mountpoint, vol_name)} device = connect_volume_nfs() else: device = conn.connect_volume(connection_info['data']) volume.attach(None, 'glance_store', attach_mode, host_name=host) volume = self._wait_volume_status(volume, 'attaching', 'in-use') if (connection_info['driver_volume_type'] == 'rbd' and not conn.do_local_attach): yield device['path'] else: with self.temporary_chown(device['path']), open( device['path'], mode) as f: yield f except Exception: LOG.exception( _LE('Exception while accessing to cinder volume ' '%(volume_id)s.'), {'volume_id': volume.id}) raise finally: if volume.status == 'in-use': volume.begin_detaching(volume) elif volume.status == 'attaching': volume.unreserve(volume) if device: try: if connection_info['driver_volume_type'] == 'nfs': @utils.synchronized(connection_info['data']['export']) def disconnect_volume_nfs(): path, vol_name = device['path'].rsplit('/', 1) self.mount.umount(vol_name, path, host, root_helper) disconnect_volume_nfs() else: conn.disconnect_volume(connection_info['data'], device) except Exception: LOG.exception( _LE('Failed to disconnect volume ' '%(volume_id)s.'), {'volume_id': volume.id}) try: volume.terminate_connection(volume, properties) except Exception: LOG.exception( _LE('Failed to terminate connection of volume ' '%(volume_id)s.'), {'volume_id': volume.id}) try: client.volumes.detach(volume) except Exception: LOG.exception(_LE('Failed to detach volume %(volume_id)s.'), {'volume_id': volume.id})
def setup_privsep(): priv_context.init(root_helper=shlex.split(get_root_helper(cfg.CONF)))
def _open_cinder_volume(self, client, volume, mode): attach_mode = 'rw' if mode == 'wb' else 'ro' device = None root_helper = get_root_helper() priv_context.init(root_helper=shlex.split(root_helper)) host = socket.gethostname() properties = connector.get_connector_properties( root_helper, host, False, False) try: volume.reserve(volume) except cinder_exception.ClientException as e: msg = (_('Failed to reserve volume %(volume_id)s: %(error)s') % { 'volume_id': volume.id, 'error': e }) LOG.error(msg) raise exceptions.BackendException(msg) try: connection_info = volume.initialize_connection(volume, properties) conn = connector.InitiatorConnector.factory( connection_info['driver_volume_type'], root_helper, conn=connection_info) device = conn.connect_volume(connection_info['data']) volume.attach(None, None, attach_mode, host_name=host) volume = self._wait_volume_status(volume, 'attaching', 'in-use') if (connection_info['driver_volume_type'] == 'rbd' and not conn.do_local_attach): yield device['path'] else: with temporary_chown(device['path']), \ open(device['path'], mode) as f: yield f except Exception: LOG.exception( _LE('Exception while accessing to cinder volume ' '%(volume_id)s.'), {'volume_id': volume.id}) raise finally: if volume.status == 'in-use': volume.begin_detaching(volume) elif volume.status == 'attaching': volume.unreserve(volume) if device: try: conn.disconnect_volume(connection_info['data'], device) except Exception: LOG.exception( _LE('Failed to disconnect volume ' '%(volume_id)s.'), {'volume_id': volume.id}) try: volume.terminate_connection(volume, properties) except Exception: LOG.exception( _LE('Failed to terminate connection of volume ' '%(volume_id)s.'), {'volume_id': volume.id}) try: client.volumes.detach(volume) except Exception: LOG.exception(_LE('Failed to detach volume %(volume_id)s.'), {'volume_id': volume.id})
def test_init_known_contexts(self): self.assertEqual(testctx.context.helper_command('/sock')[:2], ['sudo', 'privsep-helper']) priv_context.init(root_helper=['sudo', 'rootwrap']) self.assertEqual(testctx.context.helper_command('/sock')[:3], ['sudo', 'rootwrap', 'privsep-helper'])