Ejemplo n.º 1
0
 def rpc_client(self):
     if self._client is None:
         if not rpc.initialized():
             rpc.init(CONF)
             target = messaging.Target(topic=CONF.volume_topic)
             self._client = rpc.get_client(target)
     return self._client
Ejemplo n.º 2
0
    def __init__(self, host, binary, topic, manager, report_interval=None,
                 periodic_interval=None, periodic_fuzzy_delay=None,
                 service_name=None, *args, **kwargs):
        super(Service, self).__init__()

        if not rpc.initialized():
            rpc.init(CONF)

        self.host = host
        self.binary = binary
        self.topic = topic
        self.manager_class_name = manager
        manager_class = importutils.import_class(self.manager_class_name)
        if CONF.profiler.enabled:
            manager_class = profiler.trace_cls("rpc")(manager_class)

        self.manager = manager_class(host=self.host,
                                     service_name=service_name,
                                     *args, **kwargs)
        self.report_interval = report_interval
        self.periodic_interval = periodic_interval
        self.periodic_fuzzy_delay = periodic_fuzzy_delay
        self.basic_config_check()
        self.saved_args, self.saved_kwargs = args, kwargs
        self.timers = []

        setup_profiler(binary, host)
        self.rpcserver = None
Ejemplo n.º 3
0
 def test_init_no_notifications(self, driver, serializer_mock):
     """Test short-circuiting notifications with default and noop driver."""
     self.override_config('driver', driver,
                          group='oslo_messaging_notifications')
     rpc.init(test.CONF)
     self.assertEqual(rpc.utils.DO_NOTHING, rpc.NOTIFIER)
     serializer_mock.assert_not_called()
Ejemplo n.º 4
0
def main():
    objects.register_all()
    gmr_opts.set_defaults(CONF)
    CONF(sys.argv[1:], project="cinder", version=version.version_string())
    config.set_middleware_defaults()
    logging.setup(CONF, "cinder")
    LOG = logging.getLogger("cinder.all")
    versionutils.report_deprecated_feature(LOG, _("cinder-all is deprecated in Newton and will be removed in Ocata."))

    utils.monkey_patch()

    gmr.TextGuruMeditation.setup_autorun(version, conf=CONF)

    rpc.init(CONF)

    launcher = service.process_launcher()
    # cinder-api
    try:
        server = service.WSGIService("osapi_volume")
        launcher.launch_service(server, workers=server.workers or 1)
    except (Exception, SystemExit):
        LOG.exception(_LE("Failed to load osapi_volume"))

    for binary in ["cinder-scheduler", "cinder-backup"]:
        try:
            launcher.launch_service(service.Service.create(binary=binary))
        except (Exception, SystemExit):
            LOG.exception(_LE("Failed to load %s"), binary)

    # cinder-volume
    try:
        if CONF.enabled_backends:
            for backend in CONF.enabled_backends:
                CONF.register_opt(volume_cmd.host_opt, group=backend)
                backend_host = getattr(CONF, backend).backend_host
                host = "%s@%s" % (backend_host or CONF.host, backend)
                server = service.Service.create(
                    host=host, service_name=backend, binary="cinder-volume", coordination=True
                )
                # Dispose of the whole DB connection pool here before
                # starting another process.  Otherwise we run into cases
                # where child processes share DB connections which results
                # in errors.
                session.dispose_engine()
                launcher.launch_service(server)
        else:
            LOG.warning(
                _LW(
                    "Configuration for cinder-volume does not specify "
                    '"enabled_backends", using DEFAULT as backend. '
                    "Support for DEFAULT section to configure drivers "
                    "will be removed in the next release."
                )
            )
            server = service.Service.create(binary="cinder-volume", coordination=True)
            launcher.launch_service(server)
    except (Exception, SystemExit):
        LOG.exception(_LE("Failed to load cinder-volume"))

    launcher.wait()
Ejemplo n.º 5
0
def initialize_application():
    objects.register_all()
    CONF(sys.argv[1:], project='cinder',
         version=version.version_string())
    logging.setup(CONF, "cinder")

    rpc.init(CONF)
    return wsgi.Loader(CONF).load_app(name='osapi_volume')
Ejemplo n.º 6
0
def initialize_application():
    objects.register_all()
    CONF(sys.argv[1:], project="cinder", version=version.version_string())
    logging.setup(CONF, "cinder")
    config.set_middleware_defaults()

    rpc.init(CONF)
    return wsgi.Loader(CONF).load_app(name="osapi_volume")
Ejemplo n.º 7
0
    def _rpc_client(self):
        if self._client is None:
            if not rpc.initialized():
                rpc.init(CONF)
                target = messaging.Target(topic=constants.VOLUME_TOPIC)
                serializer = objects.base.CinderObjectSerializer()
                self._client = rpc.get_client(target, serializer=serializer)

        return self._client
Ejemplo n.º 8
0
    def rpc_client(self):
        if self._client is None:
            if not rpc.initialized():
                rpc.init(CONF)
                target = messaging.Target(topic=CONF.volume_topic)
                serializer = objects.base.CinderObjectSerializer()
                self._client = rpc.get_client(target, serializer=serializer)

        return self._client
Ejemplo n.º 9
0
def main():
    objects.register_all()
    gmr_opts.set_defaults(CONF)
    CONF(sys.argv[1:], project='cinder',
         version=version.version_string())
    config.set_middleware_defaults()
    logging.setup(CONF, "cinder")
    LOG = logging.getLogger('cinder.all')
    versionutils.report_deprecated_feature(LOG, _(
        'cinder-all is deprecated in Newton and will be removed in Ocata.'))

    utils.monkey_patch()

    gmr.TextGuruMeditation.setup_autorun(version, conf=CONF)

    rpc.init(CONF)

    launcher = service.process_launcher()
    # cinder-api
    try:
        server = service.WSGIService('osapi_volume')
        launcher.launch_service(server, workers=server.workers or 1)
    except (Exception, SystemExit):
        LOG.exception(_LE('Failed to load osapi_volume'))

    for binary in ['cinder-scheduler', 'cinder-backup']:
        try:
            launcher.launch_service(service.Service.create(binary=binary))
        except (Exception, SystemExit):
            LOG.exception(_LE('Failed to load %s'), binary)

    # cinder-volume
    try:
        if CONF.enabled_backends:
            for backend in CONF.enabled_backends:
                CONF.register_opt(volume_cmd.host_opt, group=backend)
                backend_host = getattr(CONF, backend).backend_host
                host = "%s@%s" % (backend_host or CONF.host, backend)
                server = service.Service.create(host=host,
                                                service_name=backend,
                                                binary='cinder-volume')
                # Dispose of the whole DB connection pool here before
                # starting another process.  Otherwise we run into cases
                # where child processes share DB connections which results
                # in errors.
                session.dispose_engine()
                launcher.launch_service(server)
        else:
            server = service.Service.create(binary='cinder-volume')
            launcher.launch_service(server)
    except (Exception, SystemExit):
        LOG.exception(_LE('Failed to load conder-volume'))

    launcher.wait()
Ejemplo n.º 10
0
def main():
    CONF(sys.argv[1:], project='cinder',
         version=version.version_string())
    logging.setup("cinder")
    utils.monkey_patch()

    rpc.init(CONF)
    launcher = service.process_launcher()
    server = service.WSGIService('osapi_volume')
    launcher.launch_service(server, workers=server.workers)
    launcher.wait()
Ejemplo n.º 11
0
def initialize_application():
    objects.register_all()
    CONF(sys.argv[1:], project='cinder',
         version=version.version_string())
    logging.setup(CONF, "cinder")
    config.set_middleware_defaults()

    rpc.init(CONF)
    service.setup_profiler(constants.API_BINARY, CONF.host)

    return wsgi.Loader(CONF).load_app(name='osapi_volume')
Ejemplo n.º 12
0
def main():
    objects.register_all()
    admin_context = context.get_admin_context()
    CONF(sys.argv[1:], project='cinder',
         version=version.version_string())
    logging.setup(CONF, "cinder")
    LOG = logging.getLogger("cinder")
    rpc.init(CONF)

    begin, end = utils.last_completed_audit_period()
    begin, end = _time_error(LOG, begin, end)

    LOG.info("Starting volume usage audit")
    LOG.info("Creating usages for %(begin_period)s until %(end_period)s",
             {"begin_period": begin, "end_period": end})

    extra_info = {
        'audit_period_beginning': str(begin),
        'audit_period_ending': str(end),
    }

    volumes = objects.VolumeList.get_all_active_by_window(admin_context,
                                                          begin,
                                                          end)

    LOG.info("Found %d volumes", len(volumes))
    for volume_ref in volumes:
        _obj_ref_action(_vol_notify_usage, LOG, volume_ref, extra_info,
                        admin_context, begin, end,
                        cinder.volume.utils.notify_about_volume_usage,
                        "volume_id", "volume")

    snapshots = objects.SnapshotList.get_all_active_by_window(admin_context,
                                                              begin, end)
    LOG.info("Found %d snapshots", len(snapshots))
    for snapshot_ref in snapshots:
        _obj_ref_action(_snap_notify_usage, LOG, snapshot_ref, extra_info,
                        admin_context, begin,
                        end, cinder.volume.utils.notify_about_snapshot_usage,
                        "snapshot_id", "snapshot")

    backups = objects.BackupList.get_all_active_by_window(admin_context,
                                                          begin, end)

    LOG.info("Found %d backups", len(backups))
    for backup_ref in backups:
        _obj_ref_action(_backup_notify_usage, LOG, backup_ref, extra_info,
                        admin_context, begin,
                        end, cinder.volume.utils.notify_about_backup_usage,
                        "backup_id", "backup")
    LOG.info("Volume usage audit completed")
Ejemplo n.º 13
0
def main():
    objects.register_all()
    CONF(sys.argv[1:], project='cinder',
         version=version.version_string())
    logging.setup(CONF, "cinder")
    utils.monkey_patch()

    gmr.TextGuruMeditation.setup_autorun(version)

    rpc.init(CONF)
    launcher = service.process_launcher()
    server = service.WSGIService('osapi_volume')
    launcher.launch_service(server, workers=server.workers)
    launcher.wait()
Ejemplo n.º 14
0
def main():
    objects.register_all()
    gmr_opts.set_defaults(CONF)
    CONF(sys.argv[1:], project='cinder', version=version.version_string())
    config.set_middleware_defaults()
    logging.setup(CONF, "cinder")
    python_logging.captureWarnings(True)
    utils.monkey_patch()

    gmr.TextGuruMeditation.setup_autorun(version, conf=CONF)

    rpc.init(CONF)
    launcher = service.process_launcher()
    server = service.WSGIService('osapi_volume')
    launcher.launch_service(server, workers=server.workers)
    launcher.wait()
Ejemplo n.º 15
0
def main():
    objects.register_all()
    gmr_opts.set_defaults(CONF)
    CONF(sys.argv[1:], project="cinder", version=version.version_string())
    config.set_middleware_defaults()
    logging.setup(CONF, "cinder")
    python_logging.captureWarnings(True)
    utils.monkey_patch()

    gmr.TextGuruMeditation.setup_autorun(version, conf=CONF)

    rpc.init(CONF)
    launcher = service.process_launcher()
    server = service.WSGIService("osapi_volume")
    launcher.launch_service(server, workers=server.workers)
    launcher.wait()
Ejemplo n.º 16
0
def _get_non_shared_target_hosts(ctxt):
    hosts = []
    numvols_needing_update = 0
    rpc.init(CONF)
    rpcapi = volume_rpcapi.VolumeAPI()

    services = objects.ServiceList.get_all_by_topic(ctxt, 'cinder-volume')
    for service in services:
        capabilities = rpcapi.get_capabilities(ctxt, service.host, True)
        if not capabilities.get('shared_targets', True):
            hosts.append(service.host)
            numvols_needing_update += db_api.model_query(
                ctxt,
                models.Volume).filter_by(shared_targets=True,
                                         service_uuid=service.uuid).count()
    return hosts, numvols_needing_update
Ejemplo n.º 17
0
def _get_non_shared_target_hosts(ctxt):
    hosts = []
    numvols_needing_update = 0
    rpc.init(CONF)
    rpcapi = volume_rpcapi.VolumeAPI()

    services = objects.ServiceList.get_all_by_topic(ctxt,
                                                    constants.VOLUME_TOPIC)
    for service in services:
        capabilities = rpcapi.get_capabilities(ctxt, service.host, True)
        if not capabilities.get('shared_targets', True):
            hosts.append(service.host)
            numvols_needing_update += db_api.model_query(
                ctxt, models.Volume).filter_by(
                    shared_targets=True,
                    service_uuid=service.uuid).count()
    return hosts, numvols_needing_update
Ejemplo n.º 18
0
    def __init__(self, host, binary, topic, manager, report_interval=None,
                 periodic_interval=None, periodic_fuzzy_delay=None,
                 service_name=None, coordination=False, *args, **kwargs):
        super(Service, self).__init__()

        if not rpc.initialized():
            rpc.init(CONF)

        self.host = host
        self.binary = binary
        self.topic = topic
        self.manager_class_name = manager
        self.coordination = coordination
        manager_class = importutils.import_class(self.manager_class_name)
        if CONF.profiler.enabled:
            manager_class = profiler.trace_cls("rpc")(manager_class)

        # NOTE(geguileo): We need to create the Service DB entry before we
        # create the manager, otherwise capped versions for serializer and rpc
        # client would used existing DB entries not including us, which could
        # result in us using None (if it's the first time the service is run)
        # or an old version (if this is a normal upgrade of a single service).
        ctxt = context.get_admin_context()
        try:
            service_ref = objects.Service.get_by_args(ctxt, host, binary)
            service_ref.rpc_current_version = manager_class.RPC_API_VERSION
            obj_version = objects_base.OBJ_VERSIONS.get_current()
            service_ref.object_current_version = obj_version
            service_ref.save()
            self.service_id = service_ref.id
        except exception.NotFound:
            self._create_service_ref(ctxt, manager_class.RPC_API_VERSION)

        self.manager = manager_class(host=self.host,
                                     service_name=service_name,
                                     *args, **kwargs)
        self.report_interval = report_interval
        self.periodic_interval = periodic_interval
        self.periodic_fuzzy_delay = periodic_fuzzy_delay
        self.basic_config_check()
        self.saved_args, self.saved_kwargs = args, kwargs
        self.timers = []

        setup_profiler(binary, host)
        self.rpcserver = None
Ejemplo n.º 19
0
def _get_non_shared_target_hosts(ctxt):
    hosts = []
    numvols_needing_update = 0
    rpc.init(CONF)
    rpcapi = volume_rpcapi.VolumeAPI()

    services = objects.ServiceList.get_all_by_topic(ctxt,
                                                    constants.VOLUME_TOPIC)
    for service in services:
        capabilities = rpcapi.get_capabilities(ctxt, service.host, True)
        # Select only non iSCSI connections and iSCSI that are explicit
        if (capabilities.get('storage_protocol') != 'iSCSI'
                or not capabilities.get('shared_targets', True)):
            hosts.append(service.host)
            numvols_needing_update += db_api.model_query(
                ctxt,
                models.Volume).filter_by(shared_targets=True,
                                         service_uuid=service.uuid).count()
    return hosts, numvols_needing_update
Ejemplo n.º 20
0
def _get_non_shared_target_hosts(ctxt):
    hosts = []
    numvols_needing_update = 0
    rpc.init(CONF)
    rpcapi = volume_rpcapi.VolumeAPI()

    services = objects.ServiceList.get_all_by_topic(ctxt,
                                                    constants.VOLUME_TOPIC)
    for service in services:
        capabilities = rpcapi.get_capabilities(ctxt, service.host, True)
        # Select only non iSCSI connections and iSCSI that are explicit
        if (capabilities.get('storage_protocol') != 'iSCSI' or
                not capabilities.get('shared_targets', True)):
            hosts.append(service.host)
            numvols_needing_update += db_api.model_query(
                ctxt, models.Volume).filter_by(
                    shared_targets=True,
                    service_uuid=service.uuid).count()
    return hosts, numvols_needing_update
Ejemplo n.º 21
0
    def delete(self, volume_id):
        """Delete a volume, bypassing the check that it must be available."""
        ctxt = context.get_admin_context()
        volume = objects.Volume.get_by_id(ctxt, volume_id)
        host = vutils.extract_host(volume.host) if volume.host else None

        if not host:
            print(_("Volume not yet assigned to host."))
            print(_("Deleting volume from database and skipping rpc."))
            volume.destroy()
            return

        if volume.status == 'in-use':
            print(_("Volume is in-use."))
            print(_("Detach volume from instance and then try again."))
            return

        rpc.init(CONF)
        rpcapi = volume_rpcapi.VolumeAPI()
        rpcapi.delete_volume(ctxt, volume)
Ejemplo n.º 22
0
    def delete(self, volume_id):
        """Delete a volume, bypassing the check that it must be available."""
        ctxt = context.get_admin_context()
        volume = objects.Volume.get_by_id(ctxt, volume_id)
        host = vutils.extract_host(volume.host) if volume.host else None

        if not host:
            print(_("Volume not yet assigned to host."))
            print(_("Deleting volume from database and skipping rpc."))
            volume.destroy()
            return

        if volume.status == 'in-use':
            print(_("Volume is in-use."))
            print(_("Detach volume from instance and then try again."))
            return

        rpc.init(CONF)
        rpcapi = volume_rpcapi.VolumeAPI()
        rpcapi.delete_volume(ctxt, volume)
Ejemplo n.º 23
0
def init_app():
    """initialize application"""

    cfg.CONF(sys.argv[1:], project='cinder', version=version.version_string())

    # choose the first in backend list
    backend = cfg.CONF.enabled_backends[0]
    cfg.CONF.register_opt(host_opt, backend)

    # get backend host
    backend_host = getattr(cfg.CONF, backend).backend_host

    if not backend_host:
        backend_host = socket.gethostname()

    # form a host
    host = "%s@%s" % (backend_host, backend)

    rpc.init(cfg.CONF)

    return (backend, host, None)
Ejemplo n.º 24
0
    def __init__(self,
                 host,
                 binary,
                 topic,
                 manager,
                 report_interval=None,
                 periodic_interval=None,
                 periodic_fuzzy_delay=None,
                 service_name=None,
                 *args,
                 **kwargs):
        super(Service, self).__init__()

        if not rpc.initialized():
            rpc.init(CONF)

        self.host = host
        self.binary = binary
        self.topic = topic
        self.manager_class_name = manager
        manager_class = importutils.import_class(self.manager_class_name)
        #modify by yanghc mantis:0000300 begin
        #manager_class = profiler.trace_cls("rpc")(manager_class)
        if CONF.profiler.profiler_enabled:
            manager_class = profiler.trace_cls("rpc")(manager_class)
        #modify by yanghc mantis:0000300 begin end

        self.manager = manager_class(host=self.host,
                                     service_name=service_name,
                                     *args,
                                     **kwargs)
        self.report_interval = report_interval
        self.periodic_interval = periodic_interval
        self.periodic_fuzzy_delay = periodic_fuzzy_delay
        self.basic_config_check()
        self.saved_args, self.saved_kwargs = args, kwargs
        self.timers = []

        setup_profiler(binary, host)
Ejemplo n.º 25
0
Archivo: test.py Proyecto: njhsi/cinder
    def setUp(self):
        """Run before each test method to initialize test environment."""
        super(TestCase, self).setUp()

        # Create default notifier
        self.notifier = fake_notifier.get_fake_notifier()

        # Mock rpc get notifier with fake notifier method that joins all
        # notifications with the default notifier
        p = mock.patch("cinder.rpc.get_notifier", side_effect=self._get_joined_notifier)
        p.start()

        # Unit tests do not need to use lazy gettext
        i18n.enable_lazy(False)

        test_timeout = os.environ.get("OS_TEST_TIMEOUT", 0)
        try:
            test_timeout = int(test_timeout)
        except ValueError:
            # If timeout value is invalid do not set a timeout.
            test_timeout = 0
        if test_timeout > 0:
            self.useFixture(fixtures.Timeout(test_timeout, gentle=True))
        self.useFixture(fixtures.NestedTempfile())
        self.useFixture(fixtures.TempHomeDir())

        environ_enabled = lambda var_name: strutils.bool_from_string(os.environ.get(var_name))
        if environ_enabled("OS_STDOUT_CAPTURE"):
            stdout = self.useFixture(fixtures.StringStream("stdout")).stream
            self.useFixture(fixtures.MonkeyPatch("sys.stdout", stdout))
        if environ_enabled("OS_STDERR_CAPTURE"):
            stderr = self.useFixture(fixtures.StringStream("stderr")).stream
            self.useFixture(fixtures.MonkeyPatch("sys.stderr", stderr))
        if environ_enabled("OS_LOG_CAPTURE"):
            log_format = "%(levelname)s [%(name)s] %(message)s"
            if environ_enabled("OS_DEBUG"):
                level = logging.DEBUG
            else:
                level = logging.INFO
            self.useFixture(fixtures.LoggerFixture(nuke_handlers=False, format=log_format, level=level))

        rpc.add_extra_exmods("cinder.tests.unit")
        self.addCleanup(rpc.clear_extra_exmods)
        self.addCleanup(rpc.cleanup)

        self.messaging_conf = messaging_conffixture.ConfFixture(CONF)
        self.messaging_conf.transport_driver = "fake"
        self.messaging_conf.response_timeout = 15
        self.useFixture(self.messaging_conf)
        rpc.init(CONF)

        conf_fixture.set_defaults(CONF)
        CONF([], default_config_files=[])

        # NOTE(vish): We need a better method for creating fixtures for tests
        #             now that we have some required db setup for the system
        #             to work properly.
        self.start = timeutils.utcnow()

        CONF.set_default("connection", "sqlite://", "database")
        CONF.set_default("sqlite_synchronous", False, "database")

        global _DB_CACHE
        if not _DB_CACHE:
            _DB_CACHE = Database(
                sqla_api,
                migration,
                sql_connection=CONF.database.connection,
                sqlite_db=CONF.database.sqlite_db,
                sqlite_clean_db=CONF.sqlite_clean_db,
            )
        self.useFixture(_DB_CACHE)

        # NOTE(danms): Make sure to reset us back to non-remote objects
        # for each test to avoid interactions. Also, backup the object
        # registry.
        objects_base.CinderObject.indirection_api = None
        self._base_test_obj_backup = copy.copy(objects_base.CinderObjectRegistry._registry._obj_classes)
        self.addCleanup(self._restore_obj_registry)

        # emulate some of the mox stuff, we can't use the metaclass
        # because it screws with our generators
        mox_fixture = self.useFixture(moxstubout.MoxStubout())
        self.mox = mox_fixture.mox
        self.stubs = mox_fixture.stubs
        self.addCleanup(CONF.reset)
        self.addCleanup(self._common_cleanup)
        self.injected = []
        self._services = []

        fake_notifier.stub_notifier(self.stubs)

        self.override_config("fatal_exception_format_errors", True)
        # This will be cleaned up by the NestedTempfile fixture
        lock_path = self.useFixture(fixtures.TempDir()).path
        self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF))
        self.fixture.config(lock_path=lock_path, group="oslo_concurrency")
        lockutils.set_defaults(lock_path)
        self.override_config(
            "policy_file",
            os.path.join(
                os.path.abspath(os.path.join(os.path.dirname(__file__), "..")), "cinder/tests/unit/policy.json"
            ),
            group="oslo_policy",
        )

        self._disable_osprofiler()
Ejemplo n.º 26
0
    def setUp(self):
        """Run before each test method to initialize test environment."""
        super(TestCase, self).setUp()

        # Create default notifier
        self.notifier = fake_notifier.get_fake_notifier()

        # Mock rpc get notifier with fake notifier method that joins all
        # notifications with the default notifier
        p = mock.patch('cinder.rpc.get_notifier',
                       side_effect=self._get_joined_notifier)
        p.start()

        # Unit tests do not need to use lazy gettext
        i18n.enable_lazy(False)

        test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0)
        try:
            test_timeout = int(test_timeout)
        except ValueError:
            # If timeout value is invalid do not set a timeout.
            test_timeout = 0
        if test_timeout > 0:
            self.useFixture(fixtures.Timeout(test_timeout, gentle=True))
        self.useFixture(fixtures.NestedTempfile())
        self.useFixture(fixtures.TempHomeDir())

        environ_enabled = (lambda var_name:
                           strutils.bool_from_string(os.environ.get(var_name)))
        if environ_enabled('OS_STDOUT_CAPTURE'):
            stdout = self.useFixture(fixtures.StringStream('stdout')).stream
            self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
        if environ_enabled('OS_STDERR_CAPTURE'):
            stderr = self.useFixture(fixtures.StringStream('stderr')).stream
            self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))

        self.useFixture(log_fixture.get_logging_handle_error_fixture())
        self.useFixture(cinder_fixtures.StandardLogging())

        rpc.add_extra_exmods("cinder.tests.unit")
        self.addCleanup(rpc.clear_extra_exmods)
        self.addCleanup(rpc.cleanup)

        self.messaging_conf = messaging_conffixture.ConfFixture(CONF)
        self.messaging_conf.transport_driver = 'fake'
        self.messaging_conf.response_timeout = 15
        self.useFixture(self.messaging_conf)
        rpc.init(CONF)

        conf_fixture.set_defaults(CONF)
        CONF([], default_config_files=[])

        # NOTE(vish): We need a better method for creating fixtures for tests
        #             now that we have some required db setup for the system
        #             to work properly.
        self.start = timeutils.utcnow()

        CONF.set_default('connection', 'sqlite://', 'database')
        CONF.set_default('sqlite_synchronous', False, 'database')

        global _DB_CACHE
        if not _DB_CACHE:
            _DB_CACHE = Database(sqla_api, migration,
                                 sql_connection=CONF.database.connection,
                                 sqlite_db=CONF.database.sqlite_db,
                                 sqlite_clean_db=CONF.sqlite_clean_db)
        self.useFixture(_DB_CACHE)

        # NOTE(danms): Make sure to reset us back to non-remote objects
        # for each test to avoid interactions. Also, backup the object
        # registry.
        objects_base.CinderObject.indirection_api = None
        self._base_test_obj_backup = copy.copy(
            objects_base.CinderObjectRegistry._registry._obj_classes)
        self.addCleanup(self._restore_obj_registry)

        # emulate some of the mox stuff, we can't use the metaclass
        # because it screws with our generators
        mox_fixture = self.useFixture(moxstubout.MoxStubout())
        self.mox = mox_fixture.mox
        self.stubs = mox_fixture.stubs
        self.addCleanup(CONF.reset)
        self.addCleanup(self._common_cleanup)
        self.injected = []
        self._services = []

        fake_notifier.stub_notifier(self.stubs)

        self.override_config('fatal_exception_format_errors', True)
        # This will be cleaned up by the NestedTempfile fixture
        lock_path = self.useFixture(fixtures.TempDir()).path
        self.fixture = self.useFixture(
            config_fixture.Config(lockutils.CONF))
        self.fixture.config(lock_path=lock_path,
                            group='oslo_concurrency')
        lockutils.set_defaults(lock_path)
        self.override_config('policy_file',
                             os.path.join(
                                 os.path.abspath(
                                     os.path.join(
                                         os.path.dirname(__file__),
                                         '..',
                                     )
                                 ),
                                 'cinder/tests/unit/policy.json'),
                             group='oslo_policy')

        self._disable_osprofiler()

        # NOTE(geguileo): This is required because common get_by_id method in
        # cinder.db.sqlalchemy.api caches get methods and if we use a mocked
        # get method in one test it would carry on to the next test.  So we
        # clear out the cache.
        sqla_api._GET_METHODS = {}
Ejemplo n.º 27
0
    def __init__(self,
                 host,
                 binary,
                 topic,
                 manager,
                 report_interval=None,
                 periodic_interval=None,
                 periodic_fuzzy_delay=None,
                 service_name=None,
                 coordination=False,
                 cluster=None,
                 *args,
                 **kwargs):
        super(Service, self).__init__()

        if not rpc.initialized():
            rpc.init(CONF)

        self.cluster = cluster
        self.host = host
        self.binary = binary
        self.topic = topic
        self.manager_class_name = manager
        self.coordination = coordination
        manager_class = importutils.import_class(self.manager_class_name)
        if CONF.profiler.enabled:
            manager_class = profiler.trace_cls("rpc")(manager_class)

        self.service = None
        self.manager = manager_class(host=self.host,
                                     cluster=self.cluster,
                                     service_name=service_name,
                                     *args,
                                     **kwargs)
        self.availability_zone = self.manager.availability_zone

        # NOTE(geguileo): We need to create the Service DB entry before we
        # create the manager, otherwise capped versions for serializer and rpc
        # client would use existing DB entries not including us, which could
        # result in us using None (if it's the first time the service is run)
        # or an old version (if this is a normal upgrade of a single service).
        ctxt = context.get_admin_context()
        try:
            service_ref = objects.Service.get_by_args(ctxt, host, binary)
            service_ref.rpc_current_version = manager_class.RPC_API_VERSION
            obj_version = objects_base.OBJ_VERSIONS.get_current()
            service_ref.object_current_version = obj_version

            # added_to_cluster attribute marks when we consider that we have
            # just added a host to a cluster so we can include resources into
            # that cluster.  We consider that we have added the host when we
            # didn't have data in the cluster DB field and our current
            # configuration has a cluster value.  We don't want to do anything
            # automatic if the cluster is changed, in those cases we'll want
            # to use cinder manage command and to it manually.
            self.added_to_cluster = (not service_ref.cluster_name and cluster)

            if service_ref.cluster_name != cluster:
                LOG.info(
                    'This service has been moved from cluster '
                    '%(cluster_svc)s to %(cluster_cfg)s. Resources '
                    'will %(opt_no)sbe moved to the new cluster', {
                        'cluster_svc': service_ref.cluster_name,
                        'cluster_cfg': cluster,
                        'opt_no': '' if self.added_to_cluster else 'NO '
                    })

            if self.added_to_cluster:
                # We pass copy service's disable status in the cluster if we
                # have to create it.
                self._ensure_cluster_exists(ctxt, service_ref)
                service_ref.cluster_name = cluster
            service_ref.save()
            Service.service_id = service_ref.id
            self.origin_service_id = service_ref.id
        except exception.NotFound:
            self._create_service_ref(ctxt, manager_class.RPC_API_VERSION)
            # Service entry Entry didn't exist because it was manually removed
            # or it's the first time running, to be on the safe side we say we
            # were added if we are clustered.
            self.added_to_cluster = bool(cluster)

        self.report_interval = report_interval
        self.periodic_interval = periodic_interval
        self.periodic_fuzzy_delay = periodic_fuzzy_delay
        self.basic_config_check()
        self.saved_args, self.saved_kwargs = args, kwargs

        setup_profiler(binary, host)
        self.rpcserver = None
        self.backend_rpcserver = None
        self.cluster_rpcserver = None
Ejemplo n.º 28
0
    def setUp(self):
        """Run before each test method to initialize test environment."""
        super(TestCase, self).setUp()

        # Import cinder objects for test cases
        objects.register_all()

        # Unit tests do not need to use lazy gettext
        i18n.enable_lazy(False)

        test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0)
        try:
            test_timeout = int(test_timeout)
        except ValueError:
            # If timeout value is invalid do not set a timeout.
            test_timeout = 0
        if test_timeout > 0:
            self.useFixture(fixtures.Timeout(test_timeout, gentle=True))
        self.useFixture(fixtures.NestedTempfile())
        self.useFixture(fixtures.TempHomeDir())

        environ_enabled = (lambda var_name:
                           strutils.bool_from_string(os.environ.get(var_name)))
        if environ_enabled('OS_STDOUT_CAPTURE'):
            stdout = self.useFixture(fixtures.StringStream('stdout')).stream
            self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
        if environ_enabled('OS_STDERR_CAPTURE'):
            stderr = self.useFixture(fixtures.StringStream('stderr')).stream
            self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))
        if environ_enabled('OS_LOG_CAPTURE'):
            log_format = '%(levelname)s [%(name)s] %(message)s'
            if environ_enabled('OS_DEBUG'):
                level = logging.DEBUG
            else:
                level = logging.INFO
            self.useFixture(fixtures.LoggerFixture(nuke_handlers=False,
                                                   format=log_format,
                                                   level=level))

        rpc.add_extra_exmods("cinder.tests.unit")
        self.addCleanup(rpc.clear_extra_exmods)
        self.addCleanup(rpc.cleanup)

        self.messaging_conf = messaging_conffixture.ConfFixture(CONF)
        self.messaging_conf.transport_driver = 'fake'
        self.messaging_conf.response_timeout = 15
        self.useFixture(self.messaging_conf)
        rpc.init(CONF)

        conf_fixture.set_defaults(CONF)
        CONF([], default_config_files=[])

        # NOTE(vish): We need a better method for creating fixtures for tests
        #             now that we have some required db setup for the system
        #             to work properly.
        self.start = timeutils.utcnow()

        CONF.set_default('connection', 'sqlite://', 'database')
        CONF.set_default('sqlite_synchronous', False, 'database')

        global _DB_CACHE
        if not _DB_CACHE:
            _DB_CACHE = Database(sqla_api, migration,
                                 sql_connection=CONF.database.connection,
                                 sqlite_db=CONF.database.sqlite_db,
                                 sqlite_clean_db=CONF.sqlite_clean_db)
        self.useFixture(_DB_CACHE)

        # emulate some of the mox stuff, we can't use the metaclass
        # because it screws with our generators
        self.mox = mox.Mox()
        self.stubs = stubout.StubOutForTesting()
        self.addCleanup(CONF.reset)
        self.addCleanup(self.mox.UnsetStubs)
        self.addCleanup(self.stubs.UnsetAll)
        self.addCleanup(self.stubs.SmartUnsetAll)
        self.addCleanup(self.mox.VerifyAll)
        self.addCleanup(self._common_cleanup)
        self.injected = []
        self._services = []

        fake_notifier.stub_notifier(self.stubs)

        self.override_config('fatal_exception_format_errors', True)
        # This will be cleaned up by the NestedTempfile fixture
        lock_path = self.useFixture(fixtures.TempDir()).path
        self.fixture = self.useFixture(
            config_fixture.Config(lockutils.CONF))
        self.fixture.config(lock_path=lock_path,
                            group='oslo_concurrency')
        lockutils.set_defaults(lock_path)
        self.override_config('policy_file',
                             os.path.join(
                                 os.path.abspath(
                                     os.path.join(
                                         os.path.dirname(__file__),
                                         '..',
                                     )
                                 ),
                                 'cinder/tests/unit/policy.json'))
Ejemplo n.º 29
0
    def __init__(self,
                 host,
                 binary,
                 topic,
                 manager,
                 report_interval=None,
                 periodic_interval=None,
                 periodic_fuzzy_delay=None,
                 service_name=None,
                 coordination=False,
                 cluster=None,
                 *args,
                 **kwargs):
        super(Service, self).__init__()

        if not rpc.initialized():
            rpc.init(CONF)

        self.cluster = cluster
        self.host = host
        self.binary = binary
        self.topic = topic
        self.manager_class_name = manager
        self.coordination = coordination
        manager_class = importutils.import_class(self.manager_class_name)
        if CONF.profiler.enabled:
            manager_class = profiler.trace_cls("rpc")(manager_class)

        # NOTE(geguileo): We need to create the Service DB entry before we
        # create the manager, otherwise capped versions for serializer and rpc
        # client would use existing DB entries not including us, which could
        # result in us using None (if it's the first time the service is run)
        # or an old version (if this is a normal upgrade of a single service).
        ctxt = context.get_admin_context()
        self.is_upgrading_to_n = self.is_svc_upgrading_to_n(binary)
        try:
            service_ref = objects.Service.get_by_args(ctxt, host, binary)
            service_ref.rpc_current_version = manager_class.RPC_API_VERSION
            obj_version = objects_base.OBJ_VERSIONS.get_current()
            service_ref.object_current_version = obj_version
            # TODO(geguileo): In O we can remove the service upgrading part on
            # the next equation, because by then all our services will be
            # properly setting the cluster during volume migrations since
            # they'll have the new Volume ORM model.  But until then we can
            # only set the cluster in the DB and pass added_to_cluster to
            # init_host when we have completed the rolling upgrade from M to N.

            # added_to_cluster attribute marks when we consider that we have
            # just added a host to a cluster so we can include resources into
            # that cluster.  We consider that we have added the host when we
            # didn't have data in the cluster DB field and our current
            # configuration has a cluster value.  We don't want to do anything
            # automatic if the cluster is changed, in those cases we'll want
            # to use cinder manage command and to it manually.
            self.added_to_cluster = (not service_ref.cluster_name and cluster
                                     and not self.is_upgrading_to_n)

            # TODO(geguileo): In O - Remove self.is_upgrading_to_n part
            if (service_ref.cluster_name != cluster
                    and not self.is_upgrading_to_n):
                LOG.info(
                    _LI('This service has been moved from cluster '
                        '%(cluster_svc)s to %(cluster_cfg)s. Resources '
                        'will %(opt_no)sbe moved to the new cluster'), {
                            'cluster_svc': service_ref.cluster_name,
                            'cluster_cfg': cluster,
                            'opt_no': '' if self.added_to_cluster else 'NO '
                        })

            if self.added_to_cluster:
                # We pass copy service's disable status in the cluster if we
                # have to create it.
                self._ensure_cluster_exists(ctxt, service_ref.disabled)
                service_ref.cluster_name = cluster
            service_ref.save()
            Service.service_id = service_ref.id
        except exception.NotFound:
            # We don't want to include cluster information on the service or
            # create the cluster entry if we are upgrading.
            self._create_service_ref(ctxt, manager_class.RPC_API_VERSION)
            # TODO(geguileo): In O set added_to_cluster to True
            # We don't want to include resources in the cluster during the
            # start while we are still doing the rolling upgrade.
            self.added_to_cluster = not self.is_upgrading_to_n

        self.manager = manager_class(host=self.host,
                                     cluster=self.cluster,
                                     service_name=service_name,
                                     *args,
                                     **kwargs)
        self.report_interval = report_interval
        self.periodic_interval = periodic_interval
        self.periodic_fuzzy_delay = periodic_fuzzy_delay
        self.basic_config_check()
        self.saved_args, self.saved_kwargs = args, kwargs
        self.timers = []

        setup_profiler(binary, host)
        self.rpcserver = None
        self.cluster_rpcserver = None
Ejemplo n.º 30
0
        return list_response

    def GetCapacity(self, req, context):
        pass

    def ControllerGetCapabilities(self, req, context):
        pass


def serve():
    server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
    csi_pb2_grpc.add_ControllerServicer_to_server(CinderServicer(), server)
    server.add_insecure_port('[::]:50051')
    server.start()
    print("now serving...")
    try:
        while True:
            time.sleep(_ONE_DAY_IN_SECONDS)
    except KeyboardInterrupt:
        server.stop(0)


if __name__ == '__main__':
    objects.register_all()
    CONF(sys.argv[1:], project='cinder', version=version.version_string())
    config.set_middleware_defaults()
    #utils.monkey_patch()
    rpc.init(CONF)

    serve()
Ejemplo n.º 31
0
def main():
    admin_context = context.get_admin_context()
    CONF(sys.argv[1:], project='cinder',
         version=version.version_string())
    logging.setup("cinder")
    LOG = logging.getLogger("cinder")
    rpc.init(CONF)
    begin, end = utils.last_completed_audit_period()
    if CONF.start_time:
        begin = datetime.strptime(CONF.start_time, "%Y-%m-%d %H:%M:%S")
    if CONF.end_time:
        end = datetime.strptime(CONF.end_time, "%Y-%m-%d %H:%M:%S")
    if not end > begin:
        msg = _("The end time (%(end)s) must be after the start "
                "time (%(start)s).") % {'start': begin,
                                        'end': end}
        print(msg)
        LOG.error(msg)
        sys.exit(-1)
    print(_("Starting volume usage audit"))
    msg = _("Creating usages for %(begin_period)s until %(end_period)s")
    print(msg % {"begin_period": str(begin), "end_period": str(end)})

    extra_info = {
        'audit_period_beginning': str(begin),
        'audit_period_ending': str(end),
    }

    volumes = db.volume_get_active_by_window(admin_context,
                                             begin,
                                             end)
    print(_("Found %d volumes") % len(volumes))
    for volume_ref in volumes:
        try:
            LOG.debug("Send exists notification for <volume_id: "
                      "%(volume_id)s> <project_id %(project_id)s> "
                      "<%(extra_info)s>" %
                      {'volume_id': volume_ref.id,
                       'project_id': volume_ref.project_id,
                       'extra_info': extra_info})
            cinder.volume.utils.notify_about_volume_usage(
                admin_context,
                volume_ref,
                'exists', extra_usage_info=extra_info)
        except Exception as e:
            LOG.error(_LE("Failed to send exists notification"
                          " for volume %s.") %
                      volume_ref.id)
            print(traceback.format_exc(e))

        if (CONF.send_actions and
                volume_ref.created_at > begin and
                volume_ref.created_at < end):
            try:
                local_extra_info = {
                    'audit_period_beginning': str(volume_ref.created_at),
                    'audit_period_ending': str(volume_ref.created_at),
                }
                LOG.debug("Send create notification for "
                          "<volume_id: %(volume_id)s> "
                          "<project_id %(project_id)s> <%(extra_info)s>" %
                          {'volume_id': volume_ref.id,
                           'project_id': volume_ref.project_id,
                           'extra_info': local_extra_info})
                cinder.volume.utils.notify_about_volume_usage(
                    admin_context,
                    volume_ref,
                    'create.start', extra_usage_info=local_extra_info)
                cinder.volume.utils.notify_about_volume_usage(
                    admin_context,
                    volume_ref,
                    'create.end', extra_usage_info=local_extra_info)
            except Exception as e:
                LOG.error(_LE("Failed to send create notification for "
                              "volume %s.") % volume_ref.id)
                print(traceback.format_exc(e))

        if (CONF.send_actions and volume_ref.deleted_at and
                volume_ref.deleted_at > begin and
                volume_ref.deleted_at < end):
            try:
                local_extra_info = {
                    'audit_period_beginning': str(volume_ref.deleted_at),
                    'audit_period_ending': str(volume_ref.deleted_at),
                }
                LOG.debug("Send delete notification for "
                          "<volume_id: %(volume_id)s> "
                          "<project_id %(project_id)s> <%(extra_info)s>" %
                          {'volume_id': volume_ref.id,
                           'project_id': volume_ref.project_id,
                           'extra_info': local_extra_info})
                cinder.volume.utils.notify_about_volume_usage(
                    admin_context,
                    volume_ref,
                    'delete.start', extra_usage_info=local_extra_info)
                cinder.volume.utils.notify_about_volume_usage(
                    admin_context,
                    volume_ref,
                    'delete.end', extra_usage_info=local_extra_info)
            except Exception as e:
                LOG.error(_LE("Failed to send delete notification for volume "
                              "%s.") % volume_ref.id)
                print(traceback.format_exc(e))

    snapshots = db.snapshot_get_active_by_window(admin_context,
                                                 begin,
                                                 end)
    print(_("Found %d snapshots") % len(snapshots))
    for snapshot_ref in snapshots:
        try:
            LOG.debug("Send notification for <snapshot_id: %(snapshot_id)s> "
                      "<project_id %(project_id)s> <%(extra_info)s>" %
                      {'snapshot_id': snapshot_ref.id,
                       'project_id': snapshot_ref.project_id,
                       'extra_info': extra_info})
            cinder.volume.utils.notify_about_snapshot_usage(admin_context,
                                                            snapshot_ref,
                                                            'exists',
                                                            extra_info)
        except Exception as e:
            LOG.error(_LE("Failed to send exists notification "
                          "for snapshot %s.")
                      % snapshot_ref.id)
            print(traceback.format_exc(e))

        if (CONF.send_actions and
                snapshot_ref.created_at > begin and
                snapshot_ref.created_at < end):
            try:
                local_extra_info = {
                    'audit_period_beginning': str(snapshot_ref.created_at),
                    'audit_period_ending': str(snapshot_ref.created_at),
                }
                LOG.debug("Send create notification for "
                          "<snapshot_id: %(snapshot_id)s> "
                          "<project_id %(project_id)s> <%(extra_info)s>" %
                          {'snapshot_id': snapshot_ref.id,
                           'project_id': snapshot_ref.project_id,
                           'extra_info': local_extra_info})
                cinder.volume.utils.notify_about_snapshot_usage(
                    admin_context,
                    snapshot_ref,
                    'create.start', extra_usage_info=local_extra_info)
                cinder.volume.utils.notify_about_snapshot_usage(
                    admin_context,
                    snapshot_ref,
                    'create.end', extra_usage_info=local_extra_info)
            except Exception as e:
                LOG.error(_LE("Failed to send create notification for snapshot"
                              "%s.") % snapshot_ref.id)
                print(traceback.format_exc(e))

        if (CONF.send_actions and snapshot_ref.deleted_at and
                snapshot_ref.deleted_at > begin and
                snapshot_ref.deleted_at < end):
            try:
                local_extra_info = {
                    'audit_period_beginning': str(snapshot_ref.deleted_at),
                    'audit_period_ending': str(snapshot_ref.deleted_at),
                }
                LOG.debug("Send delete notification for "
                          "<snapshot_id: %(snapshot_id)s> "
                          "<project_id %(project_id)s> <%(extra_info)s>" %
                          {'snapshot_id': snapshot_ref.id,
                           'project_id': snapshot_ref.project_id,
                           'extra_info': local_extra_info})
                cinder.volume.utils.notify_about_snapshot_usage(
                    admin_context,
                    snapshot_ref,
                    'delete.start', extra_usage_info=local_extra_info)
                cinder.volume.utils.notify_about_snapshot_usage(
                    admin_context,
                    snapshot_ref,
                    'delete.end', extra_usage_info=local_extra_info)
            except Exception as e:
                LOG.error(_LE("Failed to send delete notification for snapshot"
                              "%s.") % snapshot_ref.id)
                print(traceback.format_exc(e))

    print(_("Volume usage audit completed"))
Ejemplo n.º 32
0
    def setUp(self):
        """Run before each test method to initialize test environment."""
        super(TestCase, self).setUp()

        # Create default notifier
        self.notifier = fake_notifier.get_fake_notifier()

        # Mock rpc get notifier with fake notifier method that joins all
        # notifications with the default notifier
        self.patch('cinder.rpc.get_notifier',
                   side_effect=self._get_joined_notifier)

        if self.MOCK_WORKER:
            # Mock worker creation for all tests that don't care about it
            clean_path = 'cinder.objects.cleanable.CinderCleanableObject.%s'
            for method in ('create_worker', 'set_worker', 'unset_worker'):
                self.patch(clean_path % method, return_value=None)

        # Unit tests do not need to use lazy gettext
        i18n.enable_lazy(False)

        test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0)
        try:
            test_timeout = int(test_timeout)
        except ValueError:
            # If timeout value is invalid do not set a timeout.
            test_timeout = 0
        if test_timeout > 0:
            self.useFixture(fixtures.Timeout(test_timeout, gentle=True))
        self.useFixture(fixtures.NestedTempfile())
        self.useFixture(fixtures.TempHomeDir())

        environ_enabled = (lambda var_name: strutils.bool_from_string(
            os.environ.get(var_name)))
        if environ_enabled('OS_STDOUT_CAPTURE'):
            stdout = self.useFixture(fixtures.StringStream('stdout')).stream
            self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
        if environ_enabled('OS_STDERR_CAPTURE'):
            stderr = self.useFixture(fixtures.StringStream('stderr')).stream
            self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))

        self.useFixture(log_fixture.get_logging_handle_error_fixture())
        self.useFixture(cinder_fixtures.StandardLogging())

        rpc.add_extra_exmods("cinder.tests.unit")
        self.addCleanup(rpc.clear_extra_exmods)
        self.addCleanup(rpc.cleanup)

        self.messaging_conf = messaging_conffixture.ConfFixture(CONF)
        self.messaging_conf.transport_driver = 'fake'
        self.messaging_conf.response_timeout = 15
        self.useFixture(self.messaging_conf)

        # Load oslo_messaging_notifications config group so we can set an
        # override to prevent notifications from being ignored due to the
        # short-circuit mechanism.
        oslo_messaging.get_notification_transport(CONF)
        #  We need to use a valid driver for the notifications, so we use test.
        self.override_config('driver', ['test'],
                             group='oslo_messaging_notifications')
        rpc.init(CONF)

        # NOTE(geguileo): This is required because _determine_obj_version_cap
        # and _determine_rpc_version_cap functions in cinder.rpc.RPCAPI cache
        # versions in LAST_RPC_VERSIONS and LAST_OBJ_VERSIONS so we may have
        # weird interactions between tests if we don't clear them before each
        # test.
        rpc.LAST_OBJ_VERSIONS = {}
        rpc.LAST_RPC_VERSIONS = {}

        conf_fixture.set_defaults(CONF)
        CONF([], default_config_files=[])

        # NOTE(vish): We need a better method for creating fixtures for tests
        #             now that we have some required db setup for the system
        #             to work properly.
        self.start = timeutils.utcnow()

        CONF.set_default('connection', 'sqlite://', 'database')
        CONF.set_default('sqlite_synchronous', False, 'database')

        global _DB_CACHE
        if not _DB_CACHE:
            _DB_CACHE = Database(sqla_api,
                                 migration,
                                 sql_connection=CONF.database.connection)
        self.useFixture(_DB_CACHE)

        # NOTE(danms): Make sure to reset us back to non-remote objects
        # for each test to avoid interactions. Also, backup the object
        # registry.
        objects_base.CinderObject.indirection_api = None
        self._base_test_obj_backup = copy.copy(
            objects_base.CinderObjectRegistry._registry._obj_classes)
        self.addCleanup(self._restore_obj_registry)

        # emulate some of the mox stuff, we can't use the metaclass
        # because it screws with our generators
        mox_fixture = self.useFixture(moxstubout.MoxStubout())
        self.mox = mox_fixture.mox
        self.stubs = mox_fixture.stubs
        self.addCleanup(CONF.reset)
        self.addCleanup(self._common_cleanup)
        self.injected = []
        self._services = []

        fake_notifier.mock_notifier(self)

        self.override_config('fatal_exception_format_errors', True)
        # This will be cleaned up by the NestedTempfile fixture
        lock_path = self.useFixture(fixtures.TempDir()).path
        self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF))
        self.fixture.config(lock_path=lock_path, group='oslo_concurrency')
        lockutils.set_defaults(lock_path)
        self.override_config('policy_file',
                             os.path.join(
                                 os.path.abspath(
                                     os.path.join(
                                         os.path.dirname(__file__),
                                         '..',
                                     )), self.POLICY_PATH),
                             group='oslo_policy')

        self._disable_osprofiler()
        self._disallow_invalid_uuids()

        # NOTE(geguileo): This is required because common get_by_id method in
        # cinder.db.sqlalchemy.api caches get methods and if we use a mocked
        # get method in one test it would carry on to the next test.  So we
        # clear out the cache.
        sqla_api._GET_METHODS = {}

        self.override_config('backend_url',
                             'file://' + lock_path,
                             group='coordination')
        coordination.COORDINATOR.start()
        self.addCleanup(coordination.COORDINATOR.stop)
Ejemplo n.º 33
0
    def setUp(self):
        """Run before each test method to initialize test environment."""
        super(TestCase, self).setUp()

        # Create default notifier
        self.notifier = fake_notifier.get_fake_notifier()

        # Mock rpc get notifier with fake notifier method that joins all
        # notifications with the default notifier
        self.patch('cinder.rpc.get_notifier',
                   side_effect=self._get_joined_notifier)

        if self.MOCK_WORKER:
            # Mock worker creation for all tests that don't care about it
            clean_path = 'cinder.objects.cleanable.CinderCleanableObject.%s'
            for method in ('create_worker', 'set_worker', 'unset_worker'):
                self.patch(clean_path % method, return_value=None)

        if self.MOCK_TOOZ:
            self.patch('cinder.coordination.Coordinator.start')
            self.patch('cinder.coordination.Coordinator.stop')
            self.patch('cinder.coordination.Coordinator.get_lock')

        # Unit tests do not need to use lazy gettext
        i18n.enable_lazy(False)

        test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0)
        try:
            test_timeout = int(test_timeout)
        except ValueError:
            # If timeout value is invalid do not set a timeout.
            test_timeout = 0
        if test_timeout > 0:
            self.useFixture(fixtures.Timeout(test_timeout, gentle=True))
        self.useFixture(fixtures.NestedTempfile())
        self.useFixture(fixtures.TempHomeDir())

        environ_enabled = (lambda var_name:
                           strutils.bool_from_string(os.environ.get(var_name)))
        if environ_enabled('OS_STDOUT_CAPTURE'):
            stdout = self.useFixture(fixtures.StringStream('stdout')).stream
            self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
        if environ_enabled('OS_STDERR_CAPTURE'):
            stderr = self.useFixture(fixtures.StringStream('stderr')).stream
            self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))

        self.useFixture(log_fixture.get_logging_handle_error_fixture())
        self.useFixture(cinder_fixtures.StandardLogging())

        rpc.add_extra_exmods("cinder.tests.unit")
        self.addCleanup(rpc.clear_extra_exmods)
        self.addCleanup(rpc.cleanup)

        self.messaging_conf = messaging_conffixture.ConfFixture(CONF)
        self.messaging_conf.transport_driver = 'fake'
        self.messaging_conf.response_timeout = 15
        self.useFixture(self.messaging_conf)

        # Load oslo_messaging_notifications config group so we can set an
        # override to prevent notifications from being ignored due to the
        # short-circuit mechanism.
        oslo_messaging.get_notification_transport(CONF)
        #  We need to use a valid driver for the notifications, so we use test.
        self.override_config('driver', ['test'],
                             group='oslo_messaging_notifications')
        rpc.init(CONF)

        # NOTE(geguileo): This is required because _determine_obj_version_cap
        # and _determine_rpc_version_cap functions in cinder.rpc.RPCAPI cache
        # versions in LAST_RPC_VERSIONS and LAST_OBJ_VERSIONS so we may have
        # weird interactions between tests if we don't clear them before each
        # test.
        rpc.LAST_OBJ_VERSIONS = {}
        rpc.LAST_RPC_VERSIONS = {}

        conf_fixture.set_defaults(CONF)
        CONF([], default_config_files=[])

        # NOTE(vish): We need a better method for creating fixtures for tests
        #             now that we have some required db setup for the system
        #             to work properly.
        self.start = timeutils.utcnow()

        CONF.set_default('connection', 'sqlite://', 'database')
        CONF.set_default('sqlite_synchronous', False, 'database')

        global _DB_CACHE
        if not _DB_CACHE:
            _DB_CACHE = Database(sqla_api, migration,
                                 sql_connection=CONF.database.connection)
        self.useFixture(_DB_CACHE)

        # NOTE(blk-u): WarningsFixture must be after the Database fixture
        # because sqlalchemy-migrate messes with the warnings filters.
        self.useFixture(cinder_fixtures.WarningsFixture())

        # NOTE(danms): Make sure to reset us back to non-remote objects
        # for each test to avoid interactions. Also, backup the object
        # registry.
        objects_base.CinderObject.indirection_api = None
        self._base_test_obj_backup = copy.copy(
            objects_base.CinderObjectRegistry._registry._obj_classes)
        self.addCleanup(self._restore_obj_registry)

        self.addCleanup(CONF.reset)
        self.addCleanup(self._common_cleanup)
        self.injected = []
        self._services = []

        fake_notifier.mock_notifier(self)

        # This will be cleaned up by the NestedTempfile fixture
        lock_path = self.useFixture(fixtures.TempDir()).path
        self.fixture = self.useFixture(
            config_fixture.Config(lockutils.CONF))
        self.fixture.config(lock_path=lock_path,
                            group='oslo_concurrency')
        lockutils.set_defaults(lock_path)
        self.override_config('policy_file',
                             os.path.join(
                                 os.path.abspath(
                                     os.path.join(
                                         os.path.dirname(__file__),
                                         '..',
                                     )
                                 ),
                                 self.POLICY_PATH),
                             group='oslo_policy')
        self.override_config('resource_query_filters_file',
                             os.path.join(
                                 os.path.abspath(
                                     os.path.join(
                                         os.path.dirname(__file__),
                                         '..',
                                     )
                                 ),
                                 self.RESOURCE_FILTER_PATH))
        self._disable_osprofiler()

        # NOTE(geguileo): This is required because common get_by_id method in
        # cinder.db.sqlalchemy.api caches get methods and if we use a mocked
        # get method in one test it would carry on to the next test.  So we
        # clear out the cache.
        sqla_api._GET_METHODS = {}

        self.override_config('backend_url', 'file://' + lock_path,
                             group='coordination')
        coordination.COORDINATOR.start()
        self.addCleanup(coordination.COORDINATOR.stop)

        if six.PY3:
            # TODO(smcginnis) Python 3 deprecates assertRaisesRegexp to
            # assertRaisesRegex, but Python 2 does not have the new name. This
            # can be removed once we stop supporting py2 or the new name is
            # added.
            self.assertRaisesRegexp = self.assertRaisesRegex

        # Ensure we have the default tpool size value and we don't carry
        # threads from other test runs.
        tpool.killall()
        tpool._nthreads = 20
Ejemplo n.º 34
0
    def setUp(self):
        """Run before each test method to initialize test environment."""
        super(TestCase, self).setUp()

        test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0)
        try:
            test_timeout = int(test_timeout)
        except ValueError:
            # If timeout value is invalid do not set a timeout.
            test_timeout = 0
        if test_timeout > 0:
            self.useFixture(fixtures.Timeout(test_timeout, gentle=True))
        self.useFixture(fixtures.NestedTempfile())
        self.useFixture(fixtures.TempHomeDir())

        if (os.environ.get('OS_STDOUT_CAPTURE') == 'True' or
                os.environ.get('OS_STDOUT_CAPTURE') == '1'):
            stdout = self.useFixture(fixtures.StringStream('stdout')).stream
            self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
        if (os.environ.get('OS_STDERR_CAPTURE') == 'True' or
                os.environ.get('OS_STDERR_CAPTURE') == '1'):
            stderr = self.useFixture(fixtures.StringStream('stderr')).stream
            self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))

        self.log_fixture = self.useFixture(fixtures.FakeLogger(
            level=logging.DEBUG))

        rpc.add_extra_exmods("cinder.tests")
        self.addCleanup(rpc.clear_extra_exmods)
        self.addCleanup(rpc.cleanup)

        self.messaging_conf = messaging_conffixture.ConfFixture(CONF)
        self.messaging_conf.transport_driver = 'fake'
        self.messaging_conf.response_timeout = 15
        self.useFixture(self.messaging_conf)
        rpc.init(CONF)

        conf_fixture.set_defaults(CONF)
        CONF([], default_config_files=[])

        # NOTE(vish): We need a better method for creating fixtures for tests
        #             now that we have some required db setup for the system
        #             to work properly.
        self.start = timeutils.utcnow()

        CONF.set_default('connection', 'sqlite://', 'database')
        CONF.set_default('sqlite_synchronous', False)

        self.log_fixture = self.useFixture(fixtures.FakeLogger())

        global _DB_CACHE
        if not _DB_CACHE:
            _DB_CACHE = Database(session, migration,
                                 sql_connection=CONF.database.connection,
                                 sqlite_db=CONF.sqlite_db,
                                 sqlite_clean_db=CONF.sqlite_clean_db)
        self.useFixture(_DB_CACHE)

        # emulate some of the mox stuff, we can't use the metaclass
        # because it screws with our generators
        self.mox = mox.Mox()
        self.stubs = stubout.StubOutForTesting()
        self.addCleanup(CONF.reset)
        self.addCleanup(self.mox.UnsetStubs)
        self.addCleanup(self.stubs.UnsetAll)
        self.addCleanup(self.stubs.SmartUnsetAll)
        self.addCleanup(self.mox.VerifyAll)
        self.addCleanup(self._common_cleanup)
        self.injected = []
        self._services = []

        fake_notifier.stub_notifier(self.stubs)

        CONF.set_override('fatal_exception_format_errors', True)
        # This will be cleaned up by the NestedTempfile fixture
        CONF.set_override('lock_path', tempfile.mkdtemp())
Ejemplo n.º 35
0
 def test_init_notifications(self, messaging_mock):
     rpc.init(test.CONF)
     self.assertTrue(messaging_mock.JsonPayloadSerializer.called)
     self.assertTrue(messaging_mock.Notifier.called)
     self.assertEqual(rpc.NOTIFIER, messaging_mock.Notifier.return_value)
Ejemplo n.º 36
0
    def __init__(self, host, binary, topic, manager, report_interval=None,
                 periodic_interval=None, periodic_fuzzy_delay=None,
                 service_name=None, coordination=False, cluster=None, *args,
                 **kwargs):
        super(Service, self).__init__()

        if not rpc.initialized():
            rpc.init(CONF)

        self.cluster = cluster
        self.host = host
        self.binary = binary
        self.topic = topic
        self.manager_class_name = manager
        self.coordination = coordination
        manager_class = importutils.import_class(self.manager_class_name)
        if CONF.profiler.enabled:
            manager_class = profiler.trace_cls("rpc")(manager_class)

        # NOTE(geguileo): We need to create the Service DB entry before we
        # create the manager, otherwise capped versions for serializer and rpc
        # client would use existing DB entries not including us, which could
        # result in us using None (if it's the first time the service is run)
        # or an old version (if this is a normal upgrade of a single service).
        ctxt = context.get_admin_context()
        self.is_upgrading_to_n = self.is_svc_upgrading_to_n(binary)
        try:
            service_ref = objects.Service.get_by_args(ctxt, host, binary)
            service_ref.rpc_current_version = manager_class.RPC_API_VERSION
            obj_version = objects_base.OBJ_VERSIONS.get_current()
            service_ref.object_current_version = obj_version
            # TODO(geguileo): In O we can remove the service upgrading part on
            # the next equation, because by then all our services will be
            # properly setting the cluster during volume migrations since
            # they'll have the new Volume ORM model.  But until then we can
            # only set the cluster in the DB and pass added_to_cluster to
            # init_host when we have completed the rolling upgrade from M to N.

            # added_to_cluster attribute marks when we consider that we have
            # just added a host to a cluster so we can include resources into
            # that cluster.  We consider that we have added the host when we
            # didn't have data in the cluster DB field and our current
            # configuration has a cluster value.  We don't want to do anything
            # automatic if the cluster is changed, in those cases we'll want
            # to use cinder manage command and to it manually.
            self.added_to_cluster = (not service_ref.cluster_name and cluster
                                     and not self.is_upgrading_to_n)

            # TODO(geguileo): In O - Remove self.is_upgrading_to_n part
            if (service_ref.cluster_name != cluster and
                    not self.is_upgrading_to_n):
                LOG.info(_LI('This service has been moved from cluster '
                             '%(cluster_svc)s to %(cluster_cfg)s. Resources '
                             'will %(opt_no)sbe moved to the new cluster'),
                         {'cluster_svc': service_ref.cluster_name,
                          'cluster_cfg': cluster,
                          'opt_no': '' if self.added_to_cluster else 'NO '})

            if self.added_to_cluster:
                # We pass copy service's disable status in the cluster if we
                # have to create it.
                self._ensure_cluster_exists(ctxt, service_ref.disabled)
                service_ref.cluster_name = cluster
            service_ref.save()
            Service.service_id = service_ref.id
        except exception.NotFound:
            # We don't want to include cluster information on the service or
            # create the cluster entry if we are upgrading.
            self._create_service_ref(ctxt, manager_class.RPC_API_VERSION)
            # TODO(geguileo): In O set added_to_cluster to True
            # We don't want to include resources in the cluster during the
            # start while we are still doing the rolling upgrade.
            self.added_to_cluster = not self.is_upgrading_to_n

        self.manager = manager_class(host=self.host,
                                     cluster=self.cluster,
                                     service_name=service_name,
                                     *args, **kwargs)
        self.report_interval = report_interval
        self.periodic_interval = periodic_interval
        self.periodic_fuzzy_delay = periodic_fuzzy_delay
        self.basic_config_check()
        self.saved_args, self.saved_kwargs = args, kwargs
        self.timers = []

        setup_profiler(binary, host)
        self.rpcserver = None
        self.cluster_rpcserver = None
Ejemplo n.º 37
0
 def test_init_notifications(self, messaging_mock):
     rpc.init(test.CONF)
     self.assertTrue(messaging_mock.JsonPayloadSerializer.called)
     self.assertTrue(messaging_mock.Notifier.called)
     self.assertEqual(rpc.NOTIFIER, messaging_mock.Notifier.return_value)
Ejemplo n.º 38
0
    def __init__(self, host, binary, topic, manager, report_interval=None,
                 periodic_interval=None, periodic_fuzzy_delay=None,
                 service_name=None, coordination=False, cluster=None, *args,
                 **kwargs):
        super(Service, self).__init__()

        if not rpc.initialized():
            rpc.init(CONF)

        self.cluster = cluster
        self.host = host
        self.binary = binary
        self.topic = topic
        self.manager_class_name = manager
        self.coordination = coordination
        manager_class = importutils.import_class(self.manager_class_name)
        if CONF.profiler.enabled:
            manager_class = profiler.trace_cls("rpc")(manager_class)

        self.service = None
        self.manager = manager_class(host=self.host,
                                     cluster=self.cluster,
                                     service_name=service_name,
                                     *args, **kwargs)
        self.availability_zone = self.manager.availability_zone

        # NOTE(geguileo): We need to create the Service DB entry before we
        # create the manager, otherwise capped versions for serializer and rpc
        # client would use existing DB entries not including us, which could
        # result in us using None (if it's the first time the service is run)
        # or an old version (if this is a normal upgrade of a single service).
        ctxt = context.get_admin_context()
        try:
            service_ref = objects.Service.get_by_args(ctxt, host, binary)
            service_ref.rpc_current_version = manager_class.RPC_API_VERSION
            obj_version = objects_base.OBJ_VERSIONS.get_current()
            service_ref.object_current_version = obj_version

            # added_to_cluster attribute marks when we consider that we have
            # just added a host to a cluster so we can include resources into
            # that cluster.  We consider that we have added the host when we
            # didn't have data in the cluster DB field and our current
            # configuration has a cluster value.  We don't want to do anything
            # automatic if the cluster is changed, in those cases we'll want
            # to use cinder manage command and to it manually.
            self.added_to_cluster = (not service_ref.cluster_name and cluster)

            if service_ref.cluster_name != cluster:
                LOG.info('This service has been moved from cluster '
                         '%(cluster_svc)s to %(cluster_cfg)s. Resources '
                         'will %(opt_no)sbe moved to the new cluster',
                         {'cluster_svc': service_ref.cluster_name,
                          'cluster_cfg': cluster,
                          'opt_no': '' if self.added_to_cluster else 'NO '})

            if self.added_to_cluster:
                # We pass copy service's disable status in the cluster if we
                # have to create it.
                self._ensure_cluster_exists(ctxt, service_ref)
                service_ref.cluster_name = cluster
            service_ref.save()
            Service.service_id = service_ref.id
            self.origin_service_id = service_ref.id
        except exception.NotFound:
            self._create_service_ref(ctxt, manager_class.RPC_API_VERSION)
            # Service entry Entry didn't exist because it was manually removed
            # or it's the first time running, to be on the safe side we say we
            # were added if we are clustered.
            self.added_to_cluster = bool(cluster)

        self.report_interval = report_interval
        self.periodic_interval = periodic_interval
        self.periodic_fuzzy_delay = periodic_fuzzy_delay
        self.basic_config_check()
        self.saved_args, self.saved_kwargs = args, kwargs

        setup_profiler(binary, host)
        self.rpcserver = None
        self.backend_rpcserver = None
        self.cluster_rpcserver = None
Ejemplo n.º 39
0
def main():
    objects.register_all()
    admin_context = context.get_admin_context()
    CONF(sys.argv[1:], project='cinder',
         version=version.version_string())
    logging.setup(CONF, "cinder")
    LOG = logging.getLogger("cinder")
    rpc.init(CONF)
    begin, end = utils.last_completed_audit_period()
    if CONF.start_time:
        begin = datetime.datetime.strptime(CONF.start_time,
                                           "%Y-%m-%d %H:%M:%S")
    if CONF.end_time:
        end = datetime.datetime.strptime(CONF.end_time,
                                         "%Y-%m-%d %H:%M:%S")
    if not end > begin:
        msg = _("The end time (%(end)s) must be after the start "
                "time (%(start)s).") % {'start': begin,
                                        'end': end}
        LOG.error(msg)
        sys.exit(-1)
    LOG.debug("Starting volume usage audit")
    msg = _("Creating usages for %(begin_period)s until %(end_period)s")
    LOG.debug(msg, {"begin_period": str(begin), "end_period": str(end)})

    extra_info = {
        'audit_period_beginning': str(begin),
        'audit_period_ending': str(end),
    }

    volumes = db.volume_get_active_by_window(admin_context,
                                             begin,
                                             end)
    LOG.debug("Found %d volumes"), len(volumes)
    for volume_ref in volumes:
        try:
            LOG.debug("Send exists notification for <volume_id: "
                      "%(volume_id)s> <project_id %(project_id)s> "
                      "<%(extra_info)s>",
                      {'volume_id': volume_ref.id,
                       'project_id': volume_ref.project_id,
                       'extra_info': extra_info})
            cinder.volume.utils.notify_about_volume_usage(
                admin_context,
                volume_ref,
                'exists', extra_usage_info=extra_info)
        except Exception as exc_msg:
            LOG.exception(_LE("Exists volume notification failed: %s"),
                          exc_msg, resource=volume_ref)

        if (CONF.send_actions and
                volume_ref.created_at > begin and
                volume_ref.created_at < end):
            try:
                local_extra_info = {
                    'audit_period_beginning': str(volume_ref.created_at),
                    'audit_period_ending': str(volume_ref.created_at),
                }
                LOG.debug("Send create notification for "
                          "<volume_id: %(volume_id)s> "
                          "<project_id %(project_id)s> <%(extra_info)s>",
                          {'volume_id': volume_ref.id,
                           'project_id': volume_ref.project_id,
                           'extra_info': local_extra_info})
                cinder.volume.utils.notify_about_volume_usage(
                    admin_context,
                    volume_ref,
                    'create.start', extra_usage_info=local_extra_info)
                cinder.volume.utils.notify_about_volume_usage(
                    admin_context,
                    volume_ref,
                    'create.end', extra_usage_info=local_extra_info)
            except Exception as exc_msg:
                LOG.exception(_LE("Create volume notification failed: %s"),
                              exc_msg, resource=volume_ref)

        if (CONF.send_actions and volume_ref.deleted_at and
                volume_ref.deleted_at > begin and
                volume_ref.deleted_at < end):
            try:
                local_extra_info = {
                    'audit_period_beginning': str(volume_ref.deleted_at),
                    'audit_period_ending': str(volume_ref.deleted_at),
                }
                LOG.debug("Send delete notification for "
                          "<volume_id: %(volume_id)s> "
                          "<project_id %(project_id)s> <%(extra_info)s>",
                          {'volume_id': volume_ref.id,
                           'project_id': volume_ref.project_id,
                           'extra_info': local_extra_info})
                cinder.volume.utils.notify_about_volume_usage(
                    admin_context,
                    volume_ref,
                    'delete.start', extra_usage_info=local_extra_info)
                cinder.volume.utils.notify_about_volume_usage(
                    admin_context,
                    volume_ref,
                    'delete.end', extra_usage_info=local_extra_info)
            except Exception as exc_msg:
                LOG.exception(_LE("Delete volume notification failed: %s"),
                              exc_msg, resource=volume_ref)

    snapshots = objects.SnapshotList.get_active_by_window(admin_context,
                                                          begin, end)
    LOG.debug("Found %d snapshots"), len(snapshots)
    for snapshot_ref in snapshots:
        try:
            LOG.debug("Send notification for <snapshot_id: %(snapshot_id)s> "
                      "<project_id %(project_id)s> <%(extra_info)s>",
                      {'snapshot_id': snapshot_ref.id,
                       'project_id': snapshot_ref.project_id,
                       'extra_info': extra_info})
            cinder.volume.utils.notify_about_snapshot_usage(admin_context,
                                                            snapshot_ref,
                                                            'exists',
                                                            extra_info)
        except Exception as exc_msg:
            LOG.exception(_LE("Exists snapshot notification failed: %s"),
                          exc_msg, resource=snapshot_ref)

        if (CONF.send_actions and
                snapshot_ref.created_at > begin and
                snapshot_ref.created_at < end):
            try:
                local_extra_info = {
                    'audit_period_beginning': str(snapshot_ref.created_at),
                    'audit_period_ending': str(snapshot_ref.created_at),
                }
                LOG.debug("Send create notification for "
                          "<snapshot_id: %(snapshot_id)s> "
                          "<project_id %(project_id)s> <%(extra_info)s>",
                          {'snapshot_id': snapshot_ref.id,
                           'project_id': snapshot_ref.project_id,
                           'extra_info': local_extra_info})
                cinder.volume.utils.notify_about_snapshot_usage(
                    admin_context,
                    snapshot_ref,
                    'create.start', extra_usage_info=local_extra_info)
                cinder.volume.utils.notify_about_snapshot_usage(
                    admin_context,
                    snapshot_ref,
                    'create.end', extra_usage_info=local_extra_info)
            except Exception as exc_msg:
                LOG.exception(_LE("Create snapshot notification failed: %s"),
                              exc_msg, resource=snapshot_ref)

        if (CONF.send_actions and snapshot_ref.deleted_at and
                snapshot_ref.deleted_at > begin and
                snapshot_ref.deleted_at < end):
            try:
                local_extra_info = {
                    'audit_period_beginning': str(snapshot_ref.deleted_at),
                    'audit_period_ending': str(snapshot_ref.deleted_at),
                }
                LOG.debug("Send delete notification for "
                          "<snapshot_id: %(snapshot_id)s> "
                          "<project_id %(project_id)s> <%(extra_info)s>",
                          {'snapshot_id': snapshot_ref.id,
                           'project_id': snapshot_ref.project_id,
                           'extra_info': local_extra_info})
                cinder.volume.utils.notify_about_snapshot_usage(
                    admin_context,
                    snapshot_ref,
                    'delete.start', extra_usage_info=local_extra_info)
                cinder.volume.utils.notify_about_snapshot_usage(
                    admin_context,
                    snapshot_ref,
                    'delete.end', extra_usage_info=local_extra_info)
            except Exception as exc_msg:
                LOG.exception(_LE("Delete snapshot notification failed: %s"),
                              exc_msg, resource=snapshot_ref)

    LOG.debug("Volume usage audit completed")
Ejemplo n.º 40
0
    def setUp(self):
        """Run before each test method to initialize test environment."""
        super(TestCase, self).setUp()

        test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0)
        try:
            test_timeout = int(test_timeout)
        except ValueError:
            # If timeout value is invalid do not set a timeout.
            test_timeout = 0
        if test_timeout > 0:
            self.useFixture(fixtures.Timeout(test_timeout, gentle=True))
        self.useFixture(fixtures.NestedTempfile())
        self.useFixture(fixtures.TempHomeDir())

        if (os.environ.get('OS_STDOUT_CAPTURE') == 'True'
                or os.environ.get('OS_STDOUT_CAPTURE') == '1'):
            stdout = self.useFixture(fixtures.StringStream('stdout')).stream
            self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
        if (os.environ.get('OS_STDERR_CAPTURE') == 'True'
                or os.environ.get('OS_STDERR_CAPTURE') == '1'):
            stderr = self.useFixture(fixtures.StringStream('stderr')).stream
            self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))

        self.log_fixture = self.useFixture(
            fixtures.FakeLogger(level=logging.DEBUG))

        rpc.add_extra_exmods("cinder.tests")
        self.addCleanup(rpc.clear_extra_exmods)
        self.addCleanup(rpc.cleanup)

        self.messaging_conf = messaging_conffixture.ConfFixture(CONF)
        self.messaging_conf.transport_driver = 'fake'
        self.messaging_conf.response_timeout = 15
        self.useFixture(self.messaging_conf)
        rpc.init(CONF)

        conf_fixture.set_defaults(CONF)
        CONF([], default_config_files=[])

        # NOTE(vish): We need a better method for creating fixtures for tests
        #             now that we have some required db setup for the system
        #             to work properly.
        self.start = timeutils.utcnow()

        CONF.set_default('connection', 'sqlite://', 'database')
        CONF.set_default('sqlite_synchronous', False)

        self.log_fixture = self.useFixture(fixtures.FakeLogger())

        global _DB_CACHE
        if not _DB_CACHE:
            _DB_CACHE = Database(session,
                                 migration,
                                 sql_connection=CONF.database.connection,
                                 sqlite_db=CONF.sqlite_db,
                                 sqlite_clean_db=CONF.sqlite_clean_db)
        self.useFixture(_DB_CACHE)

        # emulate some of the mox stuff, we can't use the metaclass
        # because it screws with our generators
        self.mox = mox.Mox()
        self.stubs = stubout.StubOutForTesting()
        self.addCleanup(CONF.reset)
        self.addCleanup(self.mox.UnsetStubs)
        self.addCleanup(self.stubs.UnsetAll)
        self.addCleanup(self.stubs.SmartUnsetAll)
        self.addCleanup(self.mox.VerifyAll)
        self.addCleanup(self._common_cleanup)
        self.injected = []
        self._services = []

        fake_notifier.stub_notifier(self.stubs)

        CONF.set_override('fatal_exception_format_errors', True)
        # This will be cleaned up by the NestedTempfile fixture
        CONF.set_override('lock_path', tempfile.mkdtemp())
Ejemplo n.º 41
0
Archivo: test.py Proyecto: joshw/cinder
    def setUp(self):
        """Run before each test method to initialize test environment."""
        super(TestCase, self).setUp()

        # Create default notifier
        self.notifier = fake_notifier.get_fake_notifier()

        # Mock rpc get notifier with fake notifier method that joins all
        # notifications with the default notifier
        self.patch('cinder.rpc.get_notifier',
                   side_effect=self._get_joined_notifier)

        if self.MOCK_WORKER:
            # Mock worker creation for all tests that don't care about it
            clean_path = 'cinder.objects.cleanable.CinderCleanableObject.%s'
            for method in ('create_worker', 'set_worker', 'unset_worker'):
                self.patch(clean_path % method, return_value=None)

        if self.MOCK_TOOZ:
            self.patch('cinder.coordination.Coordinator.start')
            self.patch('cinder.coordination.Coordinator.stop')
            self.patch('cinder.coordination.Coordinator.get_lock')

        # Unit tests do not need to use lazy gettext
        i18n.enable_lazy(False)

        test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0)
        try:
            test_timeout = int(test_timeout)
        except ValueError:
            # If timeout value is invalid do not set a timeout.
            test_timeout = 0
        if test_timeout > 0:
            self.useFixture(fixtures.Timeout(test_timeout, gentle=True))
        self.useFixture(fixtures.NestedTempfile())
        self.useFixture(fixtures.TempHomeDir())

        environ_enabled = (lambda var_name: strutils.bool_from_string(
            os.environ.get(var_name)))
        if environ_enabled('OS_STDOUT_CAPTURE'):
            stdout = self.useFixture(fixtures.StringStream('stdout')).stream
            self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
        if environ_enabled('OS_STDERR_CAPTURE'):
            stderr = self.useFixture(fixtures.StringStream('stderr')).stream
            self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))

        self.useFixture(log_fixture.get_logging_handle_error_fixture())
        self.useFixture(cinder_fixtures.StandardLogging())

        rpc.add_extra_exmods("cinder.tests.unit")
        self.addCleanup(rpc.clear_extra_exmods)
        self.addCleanup(rpc.cleanup)

        self.messaging_conf = messaging_conffixture.ConfFixture(CONF)
        self.messaging_conf.transport_url = 'fake:/'
        self.messaging_conf.response_timeout = 15
        self.useFixture(self.messaging_conf)

        # Load oslo_messaging_notifications config group so we can set an
        # override to prevent notifications from being ignored due to the
        # short-circuit mechanism.
        oslo_messaging.get_notification_transport(CONF)
        #  We need to use a valid driver for the notifications, so we use test.
        self.override_config('driver', ['test'],
                             group='oslo_messaging_notifications')
        rpc.init(CONF)

        # NOTE(geguileo): This is required because _determine_obj_version_cap
        # and _determine_rpc_version_cap functions in cinder.rpc.RPCAPI cache
        # versions in LAST_RPC_VERSIONS and LAST_OBJ_VERSIONS so we may have
        # weird interactions between tests if we don't clear them before each
        # test.
        rpc.LAST_OBJ_VERSIONS = {}
        rpc.LAST_RPC_VERSIONS = {}

        # Init AuthProtocol to register some base options first, such as
        # auth_url.
        auth_token.AuthProtocol('fake_app', {
            'auth_type': 'password',
            'auth_url': 'fake_url'
        })

        conf_fixture.set_defaults(CONF)
        CONF([], default_config_files=[])

        # NOTE(vish): We need a better method for creating fixtures for tests
        #             now that we have some required db setup for the system
        #             to work properly.
        self.start = timeutils.utcnow()

        CONF.set_default('connection', 'sqlite://', 'database')
        CONF.set_default('sqlite_synchronous', False, 'database')

        global _DB_CACHE
        if not _DB_CACHE:
            _DB_CACHE = Database(sqla_api,
                                 migration,
                                 sql_connection=CONF.database.connection)
        self.useFixture(_DB_CACHE)

        # NOTE(blk-u): WarningsFixture must be after the Database fixture
        # because sqlalchemy-migrate messes with the warnings filters.
        self.useFixture(cinder_fixtures.WarningsFixture())

        # NOTE(danms): Make sure to reset us back to non-remote objects
        # for each test to avoid interactions. Also, backup the object
        # registry.
        objects_base.CinderObject.indirection_api = None
        self._base_test_obj_backup = copy.copy(
            objects_base.CinderObjectRegistry._registry._obj_classes)
        self.addCleanup(self._restore_obj_registry)

        self.addCleanup(CONF.reset)
        self.addCleanup(self._common_cleanup)
        self.injected = []
        self._services = []

        fake_notifier.mock_notifier(self)

        # This will be cleaned up by the NestedTempfile fixture
        lock_path = self.useFixture(fixtures.TempDir()).path
        self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF))
        self.fixture.config(lock_path=lock_path, group='oslo_concurrency')
        lockutils.set_defaults(lock_path)
        self.override_config('policy_file',
                             os.path.join(
                                 os.path.abspath(
                                     os.path.join(
                                         os.path.dirname(__file__),
                                         '..',
                                     )), self.POLICY_PATH),
                             group='oslo_policy')
        self.override_config(
            'resource_query_filters_file',
            os.path.join(
                os.path.abspath(os.path.join(
                    os.path.dirname(__file__),
                    '..',
                )), self.RESOURCE_FILTER_PATH))
        self._disable_osprofiler()

        # NOTE(geguileo): This is required because common get_by_id method in
        # cinder.db.sqlalchemy.api caches get methods and if we use a mocked
        # get method in one test it would carry on to the next test.  So we
        # clear out the cache.
        sqla_api._GET_METHODS = {}

        self.override_config('backend_url',
                             'file://' + lock_path,
                             group='coordination')
        coordination.COORDINATOR.start()
        self.addCleanup(coordination.COORDINATOR.stop)

        if six.PY3:
            # TODO(smcginnis) Python 3 deprecates assertRaisesRegexp to
            # assertRaisesRegex, but Python 2 does not have the new name. This
            # can be removed once we stop supporting py2 or the new name is
            # added.
            self.assertRaisesRegexp = self.assertRaisesRegex

        # Ensure we have the default tpool size value and we don't carry
        # threads from other test runs.
        tpool.killall()
        tpool._nthreads = 20

        # NOTE(mikal): make sure we don't load a privsep helper accidentally
        self.useFixture(cinder_fixtures.PrivsepNoHelperFixture())
Ejemplo n.º 42
0
def main():
    objects.register_all()
    admin_context = context.get_admin_context()
    CONF(sys.argv[1:], project='cinder', version=version.version_string())
    logging.setup(CONF, "cinder")
    LOG = logging.getLogger("cinder")
    rpc.init(CONF)
    begin, end = utils.last_completed_audit_period()
    if CONF.start_time:
        begin = datetime.datetime.strptime(CONF.start_time,
                                           "%Y-%m-%d %H:%M:%S")
    if CONF.end_time:
        end = datetime.datetime.strptime(CONF.end_time, "%Y-%m-%d %H:%M:%S")
    if not end > begin:
        msg = _("The end time (%(end)s) must be after the start "
                "time (%(start)s).") % {
                    'start': begin,
                    'end': end
                }
        LOG.error(msg)
        sys.exit(-1)
    LOG.debug("Starting volume usage audit")
    msg = _("Creating usages for %(begin_period)s until %(end_period)s")
    LOG.debug(msg, {"begin_period": str(begin), "end_period": str(end)})

    extra_info = {
        'audit_period_beginning': str(begin),
        'audit_period_ending': str(end),
    }

    volumes = db.volume_get_active_by_window(admin_context, begin, end)
    LOG.debug("Found %d volumes", len(volumes))
    for volume_ref in volumes:
        try:
            LOG.debug(
                "Send exists notification for <volume_id: "
                "%(volume_id)s> <project_id %(project_id)s> "
                "<%(extra_info)s>", {
                    'volume_id': volume_ref.id,
                    'project_id': volume_ref.project_id,
                    'extra_info': extra_info
                })
            cinder.volume.utils.notify_about_volume_usage(
                admin_context,
                volume_ref,
                'exists',
                extra_usage_info=extra_info)
        except Exception as exc_msg:
            LOG.exception(_LE("Exists volume notification failed: %s"),
                          exc_msg,
                          resource=volume_ref)

        if (CONF.send_actions and volume_ref.created_at > begin
                and volume_ref.created_at < end):
            try:
                local_extra_info = {
                    'audit_period_beginning': str(volume_ref.created_at),
                    'audit_period_ending': str(volume_ref.created_at),
                }
                LOG.debug(
                    "Send create notification for "
                    "<volume_id: %(volume_id)s> "
                    "<project_id %(project_id)s> <%(extra_info)s>", {
                        'volume_id': volume_ref.id,
                        'project_id': volume_ref.project_id,
                        'extra_info': local_extra_info
                    })
                cinder.volume.utils.notify_about_volume_usage(
                    admin_context,
                    volume_ref,
                    'create.start',
                    extra_usage_info=local_extra_info)
                cinder.volume.utils.notify_about_volume_usage(
                    admin_context,
                    volume_ref,
                    'create.end',
                    extra_usage_info=local_extra_info)
            except Exception as exc_msg:
                LOG.exception(_LE("Create volume notification failed: %s"),
                              exc_msg,
                              resource=volume_ref)

        if (CONF.send_actions and volume_ref.deleted_at
                and volume_ref.deleted_at > begin
                and volume_ref.deleted_at < end):
            try:
                local_extra_info = {
                    'audit_period_beginning': str(volume_ref.deleted_at),
                    'audit_period_ending': str(volume_ref.deleted_at),
                }
                LOG.debug(
                    "Send delete notification for "
                    "<volume_id: %(volume_id)s> "
                    "<project_id %(project_id)s> <%(extra_info)s>", {
                        'volume_id': volume_ref.id,
                        'project_id': volume_ref.project_id,
                        'extra_info': local_extra_info
                    })
                cinder.volume.utils.notify_about_volume_usage(
                    admin_context,
                    volume_ref,
                    'delete.start',
                    extra_usage_info=local_extra_info)
                cinder.volume.utils.notify_about_volume_usage(
                    admin_context,
                    volume_ref,
                    'delete.end',
                    extra_usage_info=local_extra_info)
            except Exception as exc_msg:
                LOG.exception(_LE("Delete volume notification failed: %s"),
                              exc_msg,
                              resource=volume_ref)

    snapshots = objects.SnapshotList.get_active_by_window(
        admin_context, begin, end)
    LOG.debug("Found %d snapshots", len(snapshots))
    for snapshot_ref in snapshots:
        try:
            LOG.debug(
                "Send notification for <snapshot_id: %(snapshot_id)s> "
                "<project_id %(project_id)s> <%(extra_info)s>", {
                    'snapshot_id': snapshot_ref.id,
                    'project_id': snapshot_ref.project_id,
                    'extra_info': extra_info
                })
            cinder.volume.utils.notify_about_snapshot_usage(
                admin_context, snapshot_ref, 'exists', extra_info)
        except Exception as exc_msg:
            LOG.exception(_LE("Exists snapshot notification failed: %s"),
                          exc_msg,
                          resource=snapshot_ref)

        if (CONF.send_actions and snapshot_ref.created_at > begin
                and snapshot_ref.created_at < end):
            try:
                local_extra_info = {
                    'audit_period_beginning': str(snapshot_ref.created_at),
                    'audit_period_ending': str(snapshot_ref.created_at),
                }
                LOG.debug(
                    "Send create notification for "
                    "<snapshot_id: %(snapshot_id)s> "
                    "<project_id %(project_id)s> <%(extra_info)s>", {
                        'snapshot_id': snapshot_ref.id,
                        'project_id': snapshot_ref.project_id,
                        'extra_info': local_extra_info
                    })
                cinder.volume.utils.notify_about_snapshot_usage(
                    admin_context,
                    snapshot_ref,
                    'create.start',
                    extra_usage_info=local_extra_info)
                cinder.volume.utils.notify_about_snapshot_usage(
                    admin_context,
                    snapshot_ref,
                    'create.end',
                    extra_usage_info=local_extra_info)
            except Exception as exc_msg:
                LOG.exception(_LE("Create snapshot notification failed: %s"),
                              exc_msg,
                              resource=snapshot_ref)

        if (CONF.send_actions and snapshot_ref.deleted_at
                and snapshot_ref.deleted_at > begin
                and snapshot_ref.deleted_at < end):
            try:
                local_extra_info = {
                    'audit_period_beginning': str(snapshot_ref.deleted_at),
                    'audit_period_ending': str(snapshot_ref.deleted_at),
                }
                LOG.debug(
                    "Send delete notification for "
                    "<snapshot_id: %(snapshot_id)s> "
                    "<project_id %(project_id)s> <%(extra_info)s>", {
                        'snapshot_id': snapshot_ref.id,
                        'project_id': snapshot_ref.project_id,
                        'extra_info': local_extra_info
                    })
                cinder.volume.utils.notify_about_snapshot_usage(
                    admin_context,
                    snapshot_ref,
                    'delete.start',
                    extra_usage_info=local_extra_info)
                cinder.volume.utils.notify_about_snapshot_usage(
                    admin_context,
                    snapshot_ref,
                    'delete.end',
                    extra_usage_info=local_extra_info)
            except Exception as exc_msg:
                LOG.exception(_LE("Delete snapshot notification failed: %s"),
                              exc_msg,
                              resource=snapshot_ref)

    LOG.debug("Volume usage audit completed")
Ejemplo n.º 43
0
    def setUp(self):
        """Run before each test method to initialize test environment."""
        super(TestCase, self).setUp()

        # Create default notifier
        self.notifier = fake_notifier.get_fake_notifier()

        # Mock rpc get notifier with fake notifier method that joins all
        # notifications with the default notifier
        p = mock.patch('cinder.rpc.get_notifier',
                       side_effect=self._get_joined_notifier)
        p.start()

        # Unit tests do not need to use lazy gettext
        i18n.enable_lazy(False)

        test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0)
        try:
            test_timeout = int(test_timeout)
        except ValueError:
            # If timeout value is invalid do not set a timeout.
            test_timeout = 0
        if test_timeout > 0:
            self.useFixture(fixtures.Timeout(test_timeout, gentle=True))
        self.useFixture(fixtures.NestedTempfile())
        self.useFixture(fixtures.TempHomeDir())

        environ_enabled = (lambda var_name:
                           strutils.bool_from_string(os.environ.get(var_name)))
        if environ_enabled('OS_STDOUT_CAPTURE'):
            stdout = self.useFixture(fixtures.StringStream('stdout')).stream
            self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
        if environ_enabled('OS_STDERR_CAPTURE'):
            stderr = self.useFixture(fixtures.StringStream('stderr')).stream
            self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))

        self.useFixture(log_fixture.get_logging_handle_error_fixture())
        self.useFixture(cinder_fixtures.StandardLogging())

        rpc.add_extra_exmods("cinder.tests.unit")
        self.addCleanup(rpc.clear_extra_exmods)
        self.addCleanup(rpc.cleanup)

        self.messaging_conf = messaging_conffixture.ConfFixture(CONF)
        self.messaging_conf.transport_driver = 'fake'
        self.messaging_conf.response_timeout = 15
        self.useFixture(self.messaging_conf)
        rpc.init(CONF)

        # NOTE(geguileo): This is required because _determine_obj_version_cap
        # and _determine_rpc_version_cap functions in cinder.rpc.RPCAPI cache
        # versions in LAST_RPC_VERSIONS and LAST_OBJ_VERSIONS so we may have
        # weird interactions between tests if we don't clear them before each
        # test.
        rpc.LAST_OBJ_VERSIONS = {}
        rpc.LAST_RPC_VERSIONS = {}

        conf_fixture.set_defaults(CONF)
        CONF([], default_config_files=[])

        # NOTE(vish): We need a better method for creating fixtures for tests
        #             now that we have some required db setup for the system
        #             to work properly.
        self.start = timeutils.utcnow()

        CONF.set_default('connection', 'sqlite://', 'database')
        CONF.set_default('sqlite_synchronous', False, 'database')

        global _DB_CACHE
        if not _DB_CACHE:
            _DB_CACHE = Database(sqla_api, migration,
                                 sql_connection=CONF.database.connection,
                                 sqlite_db=CONF.database.sqlite_db,
                                 sqlite_clean_db='clean.sqlite')
        self.useFixture(_DB_CACHE)

        # NOTE(danms): Make sure to reset us back to non-remote objects
        # for each test to avoid interactions. Also, backup the object
        # registry.
        objects_base.CinderObject.indirection_api = None
        self._base_test_obj_backup = copy.copy(
            objects_base.CinderObjectRegistry._registry._obj_classes)
        self.addCleanup(self._restore_obj_registry)

        # emulate some of the mox stuff, we can't use the metaclass
        # because it screws with our generators
        mox_fixture = self.useFixture(moxstubout.MoxStubout())
        self.mox = mox_fixture.mox
        self.stubs = mox_fixture.stubs
        self.addCleanup(CONF.reset)
        self.addCleanup(self._common_cleanup)
        self.injected = []
        self._services = []

        fake_notifier.stub_notifier(self.stubs)

        self.override_config('fatal_exception_format_errors', True)
        # This will be cleaned up by the NestedTempfile fixture
        lock_path = self.useFixture(fixtures.TempDir()).path
        self.fixture = self.useFixture(
            config_fixture.Config(lockutils.CONF))
        self.fixture.config(lock_path=lock_path,
                            group='oslo_concurrency')
        lockutils.set_defaults(lock_path)
        self.override_config('policy_file',
                             os.path.join(
                                 os.path.abspath(
                                     os.path.join(
                                         os.path.dirname(__file__),
                                         '..',
                                     )
                                 ),
                                 'cinder/tests/unit/policy.json'),
                             group='oslo_policy')

        self._disable_osprofiler()

        # NOTE(geguileo): This is required because common get_by_id method in
        # cinder.db.sqlalchemy.api caches get methods and if we use a mocked
        # get method in one test it would carry on to the next test.  So we
        # clear out the cache.
        sqla_api._GET_METHODS = {}
Ejemplo n.º 44
0
def main():
    objects.register_all()
    gmr_opts.set_defaults(CONF)
    CONF(sys.argv[1:], project='cinder', version=version.version_string())
    config.set_middleware_defaults()
    logging.setup(CONF, "cinder")
    LOG = logging.getLogger('cinder.all')
    versionutils.report_deprecated_feature(
        LOG,
        _('cinder-all is deprecated in Newton and will be removed in Ocata.'))

    utils.monkey_patch()

    gmr.TextGuruMeditation.setup_autorun(version, conf=CONF)

    rpc.init(CONF)

    launcher = service.process_launcher()
    # cinder-api
    try:
        server = service.WSGIService('osapi_volume')
        launcher.launch_service(server, workers=server.workers or 1)
    except (Exception, SystemExit):
        LOG.exception(_LE('Failed to load osapi_volume'))

    for binary in ['cinder-scheduler', 'cinder-backup']:
        try:
            launcher.launch_service(service.Service.create(binary=binary))
        except (Exception, SystemExit):
            LOG.exception(_LE('Failed to load %s'), binary)

    # cinder-volume
    try:
        if CONF.enabled_backends:
            for backend in filter(None, CONF.enabled_backends):
                CONF.register_opt(volume_cmd.host_opt, group=backend)
                backend_host = getattr(CONF, backend).backend_host
                host = "%s@%s" % (backend_host or CONF.host, backend)
                server = service.Service.create(host=host,
                                                service_name=backend,
                                                binary='cinder-volume',
                                                coordination=True)
                # Dispose of the whole DB connection pool here before
                # starting another process.  Otherwise we run into cases
                # where child processes share DB connections which results
                # in errors.
                session.dispose_engine()
                launcher.launch_service(server)
        else:
            LOG.warning(
                _LW('Configuration for cinder-volume does not specify '
                    '"enabled_backends", using DEFAULT as backend. '
                    'Support for DEFAULT section to configure drivers '
                    'will be removed in the next release.'))
            server = service.Service.create(binary='cinder-volume',
                                            coordination=True)
            launcher.launch_service(server)
    except (Exception, SystemExit):
        LOG.exception(_LE('Failed to load cinder-volume'))

    launcher.wait()