Пример #1
0
def main():
    objects.register_all()
    """Parse options and call the appropriate class/method."""
    CONF.register_cli_opt(category_opt)
    script_name = sys.argv[0]
    if len(sys.argv) < 2:
        print(_("\nOpenStack Cinder version: %(version)s\n") %
              {'version': version.version_string()})
        print(script_name + " category action [<args>]")
        print(_("Available categories:"))
        for category in CATEGORIES:
            print(_("\t%s") % category)
        sys.exit(2)

    try:
        CONF(sys.argv[1:], project='cinder',
             version=version.version_string())
        logging.setup(CONF, "cinder")
        python_logging.captureWarnings(True)
    except cfg.ConfigDirNotFoundError as details:
        print(_("Invalid directory: %s") % details)
        sys.exit(2)
    except cfg.ConfigFilesNotFoundError as e:
        cfg_files = e.config_files
        print(_("Failed to read configuration file(s): %s") % cfg_files)
        sys.exit(2)

    fn = CONF.category.action_fn
    fn_kwargs = fetch_func_args(fn)
    fn(**fn_kwargs)
Пример #2
0
def main():
    """Parse options and call the appropriate class/method."""
    CONF.register_cli_opt(category_opt)
    script_name = sys.argv[0]
    if len(sys.argv) < 2:
        print(_("\nOpenStack Cinder version: %(version)s\n") %
              {'version': version.version_string()})
        print(script_name + " category action [<args>]")
        print(_("Available categories:"))
        for category in CATEGORIES:
            print(_("\t%s") % category)
        sys.exit(2)

    try:
        CONF(sys.argv[1:], project='cinder',
             version=version.version_string())
        logging.setup(CONF, "cinder")
    except cfg.ConfigFilesNotFoundError:
        cfgfile = CONF.config_file[-1] if CONF.config_file else None
        if cfgfile and not os.access(cfgfile, os.R_OK):
            st = os.stat(cfgfile)
            print(_("Could not read %s. Re-running with sudo") % cfgfile)
            try:
                os.execvp('sudo', ['sudo', '-u', '#%s' % st.st_uid] + sys.argv)
            except Exception:
                print(_('sudo failed, continuing as if nothing happened'))

        print(_('Please re-run cinder-manage as root.'))
        sys.exit(2)

    fn = CONF.category.action_fn
    fn_args = fetch_func_args(fn)
    fn(*fn_args)
Пример #3
0
def main():
    objects.register_all()
    CONF(sys.argv[1:], project='cinder',
         version=version.version_string())
    logging.setup(CONF, "cinder")
    utils.monkey_patch()
    gmr.TextGuruMeditation.setup_autorun(version)
    launcher = service.get_launcher()
    if CONF.enabled_backends:
        for backend in CONF.enabled_backends:
            CONF.register_opt(host_opt, group=backend)
            backend_host = getattr(CONF, backend).backend_host
            host = "%s@%s" % (backend_host or CONF.host, backend)
            server = service.Service.create(host=host,
                                            service_name=backend,
                                            binary='cinder-volume')
            # Dispose of the whole DB connection pool here before
            # starting another process.  Otherwise we run into cases where
            # child processes share DB connections which results in errors.
            session.dispose_engine()
            launcher.launch_service(server)
    else:
        server = service.Service.create(binary='cinder-volume')
        launcher.launch_service(server)
    launcher.wait()
Пример #4
0
def main():
    objects.register_all()
    gmr_opts.set_defaults(CONF)
    CONF(sys.argv[1:], project='cinder',
         version=version.version_string())
    logging.set_defaults(
        default_log_levels=logging.get_default_log_levels() +
        _EXTRA_DEFAULT_LOG_LEVELS)
    logging.setup(CONF, "cinder")
    python_logging.captureWarnings(True)
    priv_context.init(root_helper=shlex.split(utils.get_root_helper()))
    utils.monkey_patch()
    gmr.TextGuruMeditation.setup_autorun(version, conf=CONF)
    global LOG
    LOG = logging.getLogger(__name__)

    if CONF.backup_workers > 1:
        LOG.info('Backup running with %s processes.', CONF.backup_workers)
        launcher = service.get_launcher()

        for i in range(CONF.backup_workers):
            _launch_backup_process(launcher, i)

        launcher.wait()
    else:
        LOG.info('Backup running in single process mode.')
        server = service.Service.create(binary='cinder-backup',
                                        coordination=True,
                                        process_number=1)
        service.serve(server)
        service.wait()
Пример #5
0
def main():
    CONF(sys.argv[1:], project="cinder", version=version.version_string())
    logging.setup("cinder")
    utils.monkey_patch()
    server = service.Service.create(binary="cinder-backup")
    service.serve(server)
    service.wait()
Пример #6
0
def main():
    objects.register_all()
    gmr_opts.set_defaults(CONF)
    CONF(sys.argv[1:], project='cinder',
         version=version.version_string())
    logging.setup(CONF, "cinder")
    python_logging.captureWarnings(True)
    priv_context.init(root_helper=shlex.split(utils.get_root_helper()))
    utils.monkey_patch()
    gmr.TextGuruMeditation.setup_autorun(version, conf=CONF)
    global LOG
    LOG = logging.getLogger(__name__)

    if not CONF.enabled_backends:
        LOG.error('Configuration for cinder-volume does not specify '
                  '"enabled_backends". Using DEFAULT section to configure '
                  'drivers is not supported since Ocata.')
        sys.exit(1)

    if os.name == 'nt':
        # We cannot use oslo.service to spawn multiple services on Windows.
        # It relies on forking, which is not available on Windows.
        # Furthermore, service objects are unmarshallable objects that are
        # passed to subprocesses.
        _launch_services_win32()
    else:
        _launch_services_posix()
Пример #7
0
def main():
    objects.register_all()
    gmr_opts.set_defaults(CONF)
    CONF(sys.argv[1:], project="cinder", version=version.version_string())
    config.set_middleware_defaults()
    logging.setup(CONF, "cinder")
    LOG = logging.getLogger("cinder.all")
    versionutils.report_deprecated_feature(LOG, _("cinder-all is deprecated in Newton and will be removed in Ocata."))

    utils.monkey_patch()

    gmr.TextGuruMeditation.setup_autorun(version, conf=CONF)

    rpc.init(CONF)

    launcher = service.process_launcher()
    # cinder-api
    try:
        server = service.WSGIService("osapi_volume")
        launcher.launch_service(server, workers=server.workers or 1)
    except (Exception, SystemExit):
        LOG.exception(_LE("Failed to load osapi_volume"))

    for binary in ["cinder-scheduler", "cinder-backup"]:
        try:
            launcher.launch_service(service.Service.create(binary=binary))
        except (Exception, SystemExit):
            LOG.exception(_LE("Failed to load %s"), binary)

    # cinder-volume
    try:
        if CONF.enabled_backends:
            for backend in CONF.enabled_backends:
                CONF.register_opt(volume_cmd.host_opt, group=backend)
                backend_host = getattr(CONF, backend).backend_host
                host = "%s@%s" % (backend_host or CONF.host, backend)
                server = service.Service.create(
                    host=host, service_name=backend, binary="cinder-volume", coordination=True
                )
                # Dispose of the whole DB connection pool here before
                # starting another process.  Otherwise we run into cases
                # where child processes share DB connections which results
                # in errors.
                session.dispose_engine()
                launcher.launch_service(server)
        else:
            LOG.warning(
                _LW(
                    "Configuration for cinder-volume does not specify "
                    '"enabled_backends", using DEFAULT as backend. '
                    "Support for DEFAULT section to configure drivers "
                    "will be removed in the next release."
                )
            )
            server = service.Service.create(binary="cinder-volume", coordination=True)
            launcher.launch_service(server)
    except (Exception, SystemExit):
        LOG.exception(_LE("Failed to load cinder-volume"))

    launcher.wait()
Пример #8
0
    def __init__(self, cinder_client):
        self.client = SSHClient('127.0.0.1', username='******')
        self.cinder_client = cinder_client

        self._NOVA_CONF = '/etc/nova/nova.conf'
        self._CINDER_CONF = '/etc/cinder/cinder.conf'
        self._is_openstack = ServiceManager.has_service(OSManager.get_openstack_cinder_service_name(), self.client)
        self._nova_installed = self.client.file_exists(self._NOVA_CONF)
        self._cinder_installed = self.client.file_exists(self._CINDER_CONF)
        self._driver_location = OSManager.get_openstack_package_base_path()
        self._openstack_users = OSManager.get_openstack_users()
        self._devstack_driver = '/opt/stack/cinder/cinder/volume/drivers/openvstorage.py'

        try:
            self._is_devstack = 'stack' in str(self.client.run('ps aux | grep SCREEN | grep stack | grep -v grep || true'))
        except SystemExit:  # ssh client raises system exit 1
            self._is_devstack = False
        except Exception:
            self._is_devstack = False

        try:
            from cinder import version
            version_string = version.version_string()
            if version_string.startswith('2015.2') or version_string.startswith('7.0'):
                self._stack_version = 'liberty'
            elif version_string.startswith('2015.1'):
                self._stack_version = 'kilo'
            elif version_string.startswith('2014.2'):
                self._stack_version = 'juno'
            else:
                raise ValueError('Unsupported cinder version: {0}'.format(version_string))
        except Exception as ex:
            raise ValueError('Cannot determine cinder version: {0}'.format(ex))
Пример #9
0
def main():
    CONF(sys.argv[1:], project='cinder',
         version=version.version_string())
    logging.setup(CONF, "cinder")
    utils.monkey_patch()
    server = service.Service.create(binary='cinder-scheduler')
    service.serve(server)
    service.wait()
Пример #10
0
def initialize_application():
    objects.register_all()
    CONF(sys.argv[1:], project='cinder',
         version=version.version_string())
    logging.setup(CONF, "cinder")

    rpc.init(CONF)
    return wsgi.Loader(CONF).load_app(name='osapi_volume')
Пример #11
0
def initialize_application():
    objects.register_all()
    CONF(sys.argv[1:], project="cinder", version=version.version_string())
    logging.setup(CONF, "cinder")
    config.set_middleware_defaults()

    rpc.init(CONF)
    return wsgi.Loader(CONF).load_app(name="osapi_volume")
Пример #12
0
def main():
    CONF(sys.argv[1:], project='cinder',
         version=version.version_string())
    logging.setup(CONF, "cinder")
    utils.monkey_patch()
    gmr.TextGuruMeditation.setup_autorun(version)
    server = service.Service.create(binary='cinder-backup')
    service.serve(server)
    service.wait()
Пример #13
0
    def start(self):
        version_string = version.version_string()
        LOG.info(_LI('Starting %(topic)s node (version %(version_string)s)'),
                 {'topic': self.topic, 'version_string': version_string})
        self.model_disconnected = False

        if self.coordination:
            coordination.COORDINATOR.start()

        self.manager.init_host(added_to_cluster=self.added_to_cluster)

        LOG.debug("Creating RPC server for service %s", self.topic)

        ctxt = context.get_admin_context()
        target = messaging.Target(topic=self.topic, server=self.host)
        endpoints = [self.manager]
        endpoints.extend(self.manager.additional_endpoints)
        obj_version_cap = objects.Service.get_minimum_obj_version(ctxt)
        LOG.debug("Pinning object versions for RPC server serializer to %s",
                  obj_version_cap)
        serializer = objects_base.CinderObjectSerializer(obj_version_cap)
        self.rpcserver = rpc.get_server(target, endpoints, serializer)
        self.rpcserver.start()

        # TODO(geguileo): In O - Remove the is_svc_upgrading_to_n part
        if self.cluster and not self.is_svc_upgrading_to_n(self.binary):
            LOG.info(_LI('Starting %(topic)s cluster %(cluster)s (version '
                         '%(version)s)'),
                     {'topic': self.topic, 'version': version_string,
                      'cluster': self.cluster})
            target = messaging.Target(topic=self.topic, server=self.cluster)
            serializer = objects_base.CinderObjectSerializer(obj_version_cap)
            self.cluster_rpcserver = rpc.get_server(target, endpoints,
                                                    serializer)
            self.cluster_rpcserver.start()

        self.manager.init_host_with_rpc()

        if self.report_interval:
            pulse = loopingcall.FixedIntervalLoopingCall(
                self.report_state)
            pulse.start(interval=self.report_interval,
                        initial_delay=self.report_interval)
            self.timers.append(pulse)

        if self.periodic_interval:
            if self.periodic_fuzzy_delay:
                initial_delay = random.randint(0, self.periodic_fuzzy_delay)
            else:
                initial_delay = None

            periodic = loopingcall.FixedIntervalLoopingCall(
                self.periodic_tasks)
            periodic.start(interval=self.periodic_interval,
                           initial_delay=initial_delay)
            self.timers.append(periodic)
Пример #14
0
def main():
    objects.register_all()
    gmr_opts.set_defaults(CONF)
    CONF(sys.argv[1:], project='cinder',
         version=version.version_string())
    logging.setup(CONF, "cinder")
    python_logging.captureWarnings(True)
    priv_context.init(root_helper=shlex.split(utils.get_root_helper()))
    utils.monkey_patch()
    gmr.TextGuruMeditation.setup_autorun(version, conf=CONF)
    launcher = service.get_launcher()
    LOG = logging.getLogger(__name__)
    service_started = False

    if CONF.enabled_backends:
        for backend in filter(None, CONF.enabled_backends):
            CONF.register_opt(host_opt, group=backend)
            backend_host = getattr(CONF, backend).backend_host
            host = "%s@%s" % (backend_host or CONF.host, backend)
            # We also want to set cluster to None on empty strings, and we
            # ignore leading and trailing spaces.
            cluster = CONF.cluster and CONF.cluster.strip()
            cluster = (cluster or None) and '%s@%s' % (cluster, backend)
            try:
                server = service.Service.create(host=host,
                                                service_name=backend,
                                                binary='cinder-volume',
                                                coordination=True,
                                                cluster=cluster)
            except Exception:
                msg = _('Volume service %s failed to start.') % host
                LOG.exception(msg)
            else:
                # Dispose of the whole DB connection pool here before
                # starting another process.  Otherwise we run into cases where
                # child processes share DB connections which results in errors.
                session.dispose_engine()
                launcher.launch_service(server)
                service_started = True
    else:
        LOG.warning(_LW('Configuration for cinder-volume does not specify '
                        '"enabled_backends", using DEFAULT as backend. '
                        'Support for DEFAULT section to configure drivers '
                        'will be removed in the next release.'))
        server = service.Service.create(binary='cinder-volume',
                                        coordination=True,
                                        cluster=CONF.cluster)
        launcher.launch_service(server)
        service_started = True

    if not service_started:
        msg = _('No volume service(s) started successfully, terminating.')
        LOG.error(msg)
        sys.exit(1)

    launcher.wait()
Пример #15
0
def main():
    objects.register_all()
    CONF(sys.argv[1:], project='cinder',   #
         version=version.version_string())
    logging.setup(CONF, "cinder")
    utils.monkey_patch()
    gmr.TextGuruMeditation.setup_autorun(version)
    server = service.Service.create(binary='cinder-scheduler')
    service.serve(server)
    service.wait()
Пример #16
0
def main():
    CONF(sys.argv[1:], project='cinder',
         version=version.version_string())
    logging.setup("cinder")
    utils.monkey_patch()

    rpc.init(CONF)
    launcher = service.process_launcher()
    server = service.WSGIService('osapi_volume')
    launcher.launch_service(server, workers=server.workers)
    launcher.wait()
Пример #17
0
def main():
    objects.register_all()
    CONF(sys.argv[1:], project='cinder',
         version=version.version_string())
    logging.setup(CONF, "cinder")
    python_logging.captureWarnings(True)
    utils.monkey_patch()
    gmr.TextGuruMeditation.setup_autorun(version)
    server = service.Service.create(binary='cinder-backup')
    service.serve(server)
    service.wait()
Пример #18
0
def main():
    objects.register_all()
    gmr_opts.set_defaults(CONF)
    CONF(sys.argv[1:], project='cinder',
         version=version.version_string())
    config.set_middleware_defaults()
    logging.setup(CONF, "cinder")
    LOG = logging.getLogger('cinder.all')
    versionutils.report_deprecated_feature(LOG, _(
        'cinder-all is deprecated in Newton and will be removed in Ocata.'))

    utils.monkey_patch()

    gmr.TextGuruMeditation.setup_autorun(version, conf=CONF)

    rpc.init(CONF)

    launcher = service.process_launcher()
    # cinder-api
    try:
        server = service.WSGIService('osapi_volume')
        launcher.launch_service(server, workers=server.workers or 1)
    except (Exception, SystemExit):
        LOG.exception(_LE('Failed to load osapi_volume'))

    for binary in ['cinder-scheduler', 'cinder-backup']:
        try:
            launcher.launch_service(service.Service.create(binary=binary))
        except (Exception, SystemExit):
            LOG.exception(_LE('Failed to load %s'), binary)

    # cinder-volume
    try:
        if CONF.enabled_backends:
            for backend in CONF.enabled_backends:
                CONF.register_opt(volume_cmd.host_opt, group=backend)
                backend_host = getattr(CONF, backend).backend_host
                host = "%s@%s" % (backend_host or CONF.host, backend)
                server = service.Service.create(host=host,
                                                service_name=backend,
                                                binary='cinder-volume')
                # Dispose of the whole DB connection pool here before
                # starting another process.  Otherwise we run into cases
                # where child processes share DB connections which results
                # in errors.
                session.dispose_engine()
                launcher.launch_service(server)
        else:
            server = service.Service.create(binary='cinder-volume')
            launcher.launch_service(server)
    except (Exception, SystemExit):
        LOG.exception(_LE('Failed to load conder-volume'))

    launcher.wait()
Пример #19
0
def initialize_application():
    objects.register_all()
    CONF(sys.argv[1:], project='cinder',
         version=version.version_string())
    logging.setup(CONF, "cinder")
    config.set_middleware_defaults()

    rpc.init(CONF)
    service.setup_profiler(constants.API_BINARY, CONF.host)

    return wsgi.Loader(CONF).load_app(name='osapi_volume')
Пример #20
0
def main():
    objects.register_all()
    gmr_opts.set_defaults(CONF)
    CONF(sys.argv[1:], project='cinder',
         version=version.version_string())
    logging.setup(CONF, "cinder")
    python_logging.captureWarnings(True)
    priv_context.init(root_helper=shlex.split(utils.get_root_helper()))
    utils.monkey_patch()
    gmr.TextGuruMeditation.setup_autorun(version, conf=CONF)
    server = service.Service.create(binary='cinder-backup')
    service.serve(server)
    service.wait()
Пример #21
0
def main():
    objects.register_all()
    admin_context = context.get_admin_context()
    CONF(sys.argv[1:], project='cinder',
         version=version.version_string())
    logging.setup(CONF, "cinder")
    LOG = logging.getLogger("cinder")
    rpc.init(CONF)

    begin, end = utils.last_completed_audit_period()
    begin, end = _time_error(LOG, begin, end)

    LOG.info("Starting volume usage audit")
    LOG.info("Creating usages for %(begin_period)s until %(end_period)s",
             {"begin_period": begin, "end_period": end})

    extra_info = {
        'audit_period_beginning': str(begin),
        'audit_period_ending': str(end),
    }

    volumes = objects.VolumeList.get_all_active_by_window(admin_context,
                                                          begin,
                                                          end)

    LOG.info("Found %d volumes", len(volumes))
    for volume_ref in volumes:
        _obj_ref_action(_vol_notify_usage, LOG, volume_ref, extra_info,
                        admin_context, begin, end,
                        cinder.volume.utils.notify_about_volume_usage,
                        "volume_id", "volume")

    snapshots = objects.SnapshotList.get_all_active_by_window(admin_context,
                                                              begin, end)
    LOG.info("Found %d snapshots", len(snapshots))
    for snapshot_ref in snapshots:
        _obj_ref_action(_snap_notify_usage, LOG, snapshot_ref, extra_info,
                        admin_context, begin,
                        end, cinder.volume.utils.notify_about_snapshot_usage,
                        "snapshot_id", "snapshot")

    backups = objects.BackupList.get_all_active_by_window(admin_context,
                                                          begin, end)

    LOG.info("Found %d backups", len(backups))
    for backup_ref in backups:
        _obj_ref_action(_backup_notify_usage, LOG, backup_ref, extra_info,
                        admin_context, begin,
                        end, cinder.volume.utils.notify_about_backup_usage,
                        "backup_id", "backup")
    LOG.info("Volume usage audit completed")
Пример #22
0
def main():
    objects.register_all()
    CONF(sys.argv[1:], project='cinder',
         version=version.version_string())
    logging.setup(CONF, "cinder")
    utils.monkey_patch()

    gmr.TextGuruMeditation.setup_autorun(version)

    rpc.init(CONF)
    launcher = service.process_launcher()
    server = service.WSGIService('osapi_volume')
    launcher.launch_service(server, workers=server.workers)
    launcher.wait()
Пример #23
0
    def start(self):
        version_string = version.version_string()
        LOG.audit(_('Starting %(topic)s node (version %(version_string)s)'),
                  {'topic': self.topic, 'version_string': version_string})
        self.manager.init_host()
        self.model_disconnected = False
        ctxt = context.get_admin_context()
        try:
            service_ref = db.service_get_by_args(ctxt,
                                                 self.host,
                                                 self.binary)
            self.service_id = service_ref['id']
        except exception.NotFound:
            self._create_service_ref(ctxt)

        self.conn = rpc.create_connection(new=True)
        LOG.debug(_("Creating Consumer connection for Service %s") %
                  self.topic)

        rpc_dispatcher = self.manager.create_rpc_dispatcher()

        # Share this same connection for these Consumers
        self.conn.create_consumer(self.topic, rpc_dispatcher, fanout=False)

        node_topic = '%s.%s' % (self.topic, self.host)
        self.conn.create_consumer(node_topic, rpc_dispatcher, fanout=False)

        self.conn.create_consumer(self.topic, rpc_dispatcher, fanout=True)

        # Consume from all consumers in a thread
        self.conn.consume_in_thread()

        if self.report_interval:
            pulse = utils.LoopingCall(self.report_state)
            pulse.start(interval=self.report_interval,
                        initial_delay=self.report_interval)
            self.timers.append(pulse)

        if self.periodic_interval:
            if self.periodic_fuzzy_delay:
                initial_delay = random.randint(0, self.periodic_fuzzy_delay)
            else:
                initial_delay = None

            periodic = utils.LoopingCall(self.periodic_tasks)
            periodic.start(interval=self.periodic_interval,
                           initial_delay=initial_delay)
            self.timers.append(periodic)
Пример #24
0
def main():
    objects.register_all()
    gmr_opts.set_defaults(CONF)
    CONF(sys.argv[1:], project="cinder", version=version.version_string())
    config.set_middleware_defaults()
    logging.setup(CONF, "cinder")
    python_logging.captureWarnings(True)
    utils.monkey_patch()

    gmr.TextGuruMeditation.setup_autorun(version, conf=CONF)

    rpc.init(CONF)
    launcher = service.process_launcher()
    server = service.WSGIService("osapi_volume")
    launcher.launch_service(server, workers=server.workers)
    launcher.wait()
Пример #25
0
    def start(self):
        version_string = version.version_string()
        LOG.info(_LI('Starting %(topic)s node (version %(version_string)s)'),
                 {'topic': self.topic, 'version_string': version_string})
        self.model_disconnected = False
        self.manager.init_host()
        ctxt = context.get_admin_context()
        try:
            service_ref = objects.Service.get_by_args(
                ctxt, self.host, self.binary)
            service_ref.rpc_current_version = self.manager.RPC_API_VERSION
            obj_version = objects_base.OBJ_VERSIONS.get_current()
            service_ref.object_current_version = obj_version
            service_ref.save()
            self.service_id = service_ref.id
        except exception.NotFound:
            self._create_service_ref(ctxt)

        LOG.debug("Creating RPC server for service %s", self.topic)

        target = messaging.Target(topic=self.topic, server=self.host)
        endpoints = [self.manager]
        endpoints.extend(self.manager.additional_endpoints)
        serializer = objects_base.CinderObjectSerializer()
        self.rpcserver = rpc.get_server(target, endpoints, serializer)
        self.rpcserver.start()

        self.manager.init_host_with_rpc()

        if self.report_interval:
            pulse = loopingcall.FixedIntervalLoopingCall(
                self.report_state)
            pulse.start(interval=self.report_interval,
                        initial_delay=self.report_interval)
            self.timers.append(pulse)

        if self.periodic_interval:
            if self.periodic_fuzzy_delay:
                initial_delay = random.randint(0, self.periodic_fuzzy_delay)
            else:
                initial_delay = None

            periodic = loopingcall.FixedIntervalLoopingCall(
                self.periodic_tasks)
            periodic.start(interval=self.periodic_interval,
                           initial_delay=initial_delay)
            self.timers.append(periodic)
Пример #26
0
 def _get_version(self):
     """
     Get openstack cinder version
     """
     try:
         from cinder import version
         version = version.version_string()
         if version.startswith('2015.2'):
             return 'kilo'  # For the moment use K driver
         elif version.startswith('2015.1'):
             return 'kilo'
         elif version.startswith('2014.2'):
             return 'juno'
         else:
             raise ValueError('Unknown cinder version: %s' % version)
     except Exception as ex:
         raise ValueError('Cannot determine cinder version: %s' % str(ex))
Пример #27
0
def main():
    objects.register_all()
    gmr_opts.set_defaults(CONF)
    CONF(sys.argv[1:], project='cinder',
         version=version.version_string())
    logging.setup(CONF, "cinder")
    python_logging.captureWarnings(True)
    utils.monkey_patch()
    gmr.TextGuruMeditation.setup_autorun(version, conf=CONF)
    launcher = service.get_launcher()
    LOG = logging.getLogger(__name__)
    service_started = False

    if CONF.enabled_backends:
        for backend in CONF.enabled_backends:
            CONF.register_opt(host_opt, group=backend)
            backend_host = getattr(CONF, backend).backend_host
            host = "%s@%s" % (backend_host or CONF.host, backend)
            try:
                server = service.Service.create(host=host,
                                                service_name=backend,
                                                binary='cinder-volume',
                                                coordination=True)
            except Exception:
                msg = _('Volume service %s failed to start.') % host
                LOG.exception(msg)
            else:
                # Dispose of the whole DB connection pool here before
                # starting another process.  Otherwise we run into cases where
                # child processes share DB connections which results in errors.
                session.dispose_engine()
                launcher.launch_service(server)
                service_started = True
    else:
        server = service.Service.create(binary='cinder-volume',
                                        coordination=True)
        launcher.launch_service(server)
        service_started = True

    if not service_started:
        msg = _('No volume service(s) started successfully, terminating.')
        LOG.error(msg)
        sys.exit(1)

    launcher.wait()
Пример #28
0
    def start(self):
        version_string = version.version_string()
        LOG.info(_('Starting %(topic)s node (version %(version_string)s)'),
                 {'topic': self.topic, 'version_string': version_string})
        self.model_disconnected = False
        init_host_res = self.manager.init_host()
        if init_host_res == 'INIT_DRIVER_ERROR':
            LOG.error(_('Init host error,service will exit!'))
            sys.exit(2) 
        ctxt = context.get_admin_context()
        try:
            service_ref = db.service_get_by_args(ctxt,
                                                 self.host,
                                                 self.binary)
            self.service_id = service_ref['id']
        except exception.NotFound:
            self._create_service_ref(ctxt)

        LOG.debug("Creating RPC server for service %s" % self.topic)

        target = messaging.Target(topic=self.topic, server=self.host)
        endpoints = [self.manager]
        endpoints.extend(self.manager.additional_endpoints)
        self.rpcserver = rpc.get_server(target, endpoints)
        self.rpcserver.start()

        if self.report_interval:
            pulse = loopingcall.FixedIntervalLoopingCall(
                self.report_state)
            pulse.start(interval=self.report_interval,
                        initial_delay=self.report_interval)
            self.timers.append(pulse)

        if self.periodic_interval:
            if self.periodic_fuzzy_delay:
                initial_delay = random.randint(0, self.periodic_fuzzy_delay)
            else:
                initial_delay = None

            periodic = loopingcall.FixedIntervalLoopingCall(
                self.periodic_tasks)
            periodic.start(interval=self.periodic_interval,
                           initial_delay=initial_delay)
            self.timers.append(periodic)
Пример #29
0
def main():
    CONF(sys.argv[1:], project='cinder',
         version=version.version_string())
    logging.setup("cinder")
    utils.monkey_patch()
    launcher = service.get_launcher()
    if CONF.enabled_backends:
        for backend in CONF.enabled_backends:
            CONF.register_opt(host_opt, group=backend)
            backend_host = getattr(CONF, backend).backend_host
            host = "%s@%s" % (backend_host or CONF.host, backend)
            server = service.Service.create(host=host,
                                            service_name=backend,
                                            binary='cinder-volume')
            launcher.launch_service(server)
    else:
        server = service.Service.create(binary='cinder-volume')
        launcher.launch_service(server)
    launcher.wait()
Пример #30
0
Файл: all.py Проект: Qeas/cinder
def main():
    CONF(sys.argv[1:], project='cinder',
         version=version.version_string())
    logging.setup("cinder")
    LOG = logging.getLogger('cinder.all')

    utils.monkey_patch()
    launcher = service.process_launcher()
    # cinder-api
    try:
        server = service.WSGIService('osapi_volume')
        launcher.launch_service(server, workers=server.workers or 1)
    except (Exception, SystemExit):
        LOG.exception(_LE('Failed to load osapi_volume'))

    for binary in ['cinder-volume', 'cinder-scheduler', 'cinder-backup']:
        try:
            launcher.launch_service(service.Service.create(binary=binary))
        except (Exception, SystemExit):
            LOG.exception(_LE('Failed to load %s'), binary)
    launcher.wait()
Пример #31
0
    def start(self):
        version_string = version.version_string()
        LOG.info('Starting %(topic)s node (version %(version_string)s)', {
            'topic': self.topic,
            'version_string': version_string
        })
        self.model_disconnected = False

        if self.coordination:
            coordination.COORDINATOR.start()

        # NOTE(yikun): When re-spawning child process, we should set the class
        # attribute back using the origin service_id, otherwise,
        # the Service.service_id will be inherited from the parent process,
        # and will be recorded as the last started service id by mistaken.
        Service.service_id = self.origin_service_id

        self.manager.init_host(added_to_cluster=self.added_to_cluster,
                               service_id=Service.service_id)

        LOG.debug("Creating RPC server for service %s", self.topic)

        ctxt = context.get_admin_context()
        endpoints = [self.manager]
        endpoints.extend(self.manager.additional_endpoints)
        obj_version_cap = objects.Service.get_minimum_obj_version(ctxt)
        LOG.debug("Pinning object versions for RPC server serializer to %s",
                  obj_version_cap)
        serializer = objects_base.CinderObjectSerializer(obj_version_cap)

        target = messaging.Target(topic=self.topic, server=self.host)
        self.rpcserver = rpc.get_server(target, endpoints, serializer)
        self.rpcserver.start()

        # NOTE(dulek): Kids, don't do that at home. We're relying here on
        # oslo.messaging implementation details to keep backward compatibility
        # with pre-Ocata services. This will not matter once we drop
        # compatibility with them.
        if self.topic == constants.VOLUME_TOPIC:
            target = messaging.Target(topic='%(topic)s.%(host)s' % {
                'topic': self.topic,
                'host': self.host
            },
                                      server=vol_utils.extract_host(
                                          self.host, 'host'))
            self.backend_rpcserver = rpc.get_server(target, endpoints,
                                                    serializer)
            self.backend_rpcserver.start()

        if self.cluster:
            LOG.info(
                'Starting %(topic)s cluster %(cluster)s (version '
                '%(version)s)', {
                    'topic': self.topic,
                    'version': version_string,
                    'cluster': self.cluster
                })
            target = messaging.Target(
                topic='%s.%s' % (self.topic, self.cluster),
                server=vol_utils.extract_host(self.cluster, 'host'))
            serializer = objects_base.CinderObjectSerializer(obj_version_cap)
            self.cluster_rpcserver = rpc.get_server(target, endpoints,
                                                    serializer)
            self.cluster_rpcserver.start()

        self.manager.init_host_with_rpc()

        if self.report_interval:
            self.tg.add_timer(self.report_interval,
                              self.report_state,
                              initial_delay=self.report_interval)

        if self.periodic_interval:
            if self.periodic_fuzzy_delay:
                initial_delay = random.randint(0, self.periodic_fuzzy_delay)
            else:
                initial_delay = None
            self.tg.add_timer(self.periodic_interval,
                              self.periodic_tasks,
                              initial_delay=initial_delay)
Пример #32
0
    def test_get_manageable_volumes(self):
        testvol = _stub_volume()
        v1 = {
            'reference': {
                'source-name': 'some-ai:storage-1:volume-1'
            },
            'size': 1,
            'safe_to_manage': True,
            'reason_not_safe': '',
            'cinder_id': None,
            'extra_info': {
                'snapshots': '[]'
            }
        }
        v2 = {
            'reference': {
                'source-name': 'some-other-ai:storage-1:volume-1'
            },
            'size': 2,
            'safe_to_manage': True,
            'reason_not_safe': '',
            'cinder_id': None,
            'extra_info': {
                'snapshots': '[]'
            }
        }

        mock1 = mock.MagicMock()
        mock1.__getitem__.side_effect = ['some-ai']
        mock1.name = 'some-ai'
        mocksi1 = mock.MagicMock()
        mocksi1.name = "storage-1"
        mocksi1.__getitem__.side_effect = [[mock.MagicMock()]]
        mock1.storage_instances.list.return_value = [mocksi1]
        mockvol1 = mock.MagicMock()
        mockvol1.name = "volume-1"
        mockvol1.size = v1['size']
        mocksi1.volumes.list.return_value = [mockvol1]

        mock2 = mock.MagicMock()
        mock2.__getitem__.side_effect = ['some-other-ai']
        mock2.name = 'some-other-ai'
        mocksi2 = mock.MagicMock()
        mocksi2.name = "storage-1"
        mocksi2.__getitem__.side_effect = [[mock.MagicMock()]]
        mock2.storage_instances.list.return_value = [mocksi2]
        mockvol2 = mock.MagicMock()
        mockvol2.name = "volume-1"
        mockvol2.size = v2['size']
        mocksi2.volumes.list.return_value = [mockvol2]

        listmock = mock.MagicMock()
        listmock.return_value = [mock1, mock2]
        self.driver.api.app_instances.list = listmock

        marker = mock.MagicMock()
        limit = mock.MagicMock()
        offset = mock.MagicMock()
        sort_keys = mock.MagicMock()
        sort_dirs = mock.MagicMock()
        if (version.version_string() >= '15.0.0'):
            with mock.patch(
                    'cinder.volume.volume_utils.paginate_entries_list') \
                    as mpage:
                self.driver.get_manageable_volumes([testvol], marker, limit,
                                                   offset, sort_keys,
                                                   sort_dirs)
                mpage.assert_called_once_with([v1, v2], marker, limit, offset,
                                              sort_keys, sort_dirs)
        else:
            with mock.patch(
                    'cinder.volume.utils.paginate_entries_list') as mpage:
                self.driver.get_manageable_volumes([testvol], marker, limit,
                                                   offset, sort_keys,
                                                   sort_dirs)
                mpage.assert_called_once_with([v1, v2], marker, limit, offset,
                                              sort_keys, sort_dirs)
Пример #33
0
# The master toctree document.
master_doc = 'index'

# General information about the project.
project = u'cinder'
copyright = u'2010-present, OpenStack, LLC'

# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
from cinder import version as cinder_version
#import cinder.version
# The full version, including alpha/beta/rc tags.
release = cinder_version.version_string()
# The short X.Y version.
version = cinder_version.canonical_version_string()

# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None

# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'

# List of documents that shouldn't be included in the build.
unused_docs = [
Пример #34
0
def main():
    admin_context = context.get_admin_context()
    CONF(sys.argv[1:], project='cinder',
         version=version.version_string())
    logging.setup("cinder")
    LOG = logging.getLogger("cinder")
    rpc.init(CONF)
    begin, end = utils.last_completed_audit_period()
    if CONF.start_time:
        begin = datetime.strptime(CONF.start_time, "%Y-%m-%d %H:%M:%S")
    if CONF.end_time:
        end = datetime.strptime(CONF.end_time, "%Y-%m-%d %H:%M:%S")
    if not end > begin:
        msg = _("The end time (%(end)s) must be after the start "
                "time (%(start)s).") % {'start': begin,
                                        'end': end}
        print(msg)
        LOG.error(msg)
        sys.exit(-1)
    print(_("Starting volume usage audit"))
    msg = _("Creating usages for %(begin_period)s until %(end_period)s")
    print(msg % {"begin_period": str(begin), "end_period": str(end)})

    extra_info = {
        'audit_period_beginning': str(begin),
        'audit_period_ending': str(end),
    }

    volumes = db.volume_get_active_by_window(admin_context,
                                             begin,
                                             end)
    print(_("Found %d volumes") % len(volumes))
    for volume_ref in volumes:
        try:
            LOG.debug("Send exists notification for <volume_id: "
                      "%(volume_id)s> <project_id %(project_id)s> "
                      "<%(extra_info)s>" %
                      {'volume_id': volume_ref.id,
                       'project_id': volume_ref.project_id,
                       'extra_info': extra_info})
            cinder.volume.utils.notify_about_volume_usage(
                admin_context,
                volume_ref,
                'exists', extra_usage_info=extra_info)
        except Exception as e:
            LOG.error(_LE("Failed to send exists notification"
                          " for volume %s.") %
                      volume_ref.id)
            print(traceback.format_exc(e))

        if (CONF.send_actions and
                volume_ref.created_at > begin and
                volume_ref.created_at < end):
            try:
                local_extra_info = {
                    'audit_period_beginning': str(volume_ref.created_at),
                    'audit_period_ending': str(volume_ref.created_at),
                }
                LOG.debug("Send create notification for "
                          "<volume_id: %(volume_id)s> "
                          "<project_id %(project_id)s> <%(extra_info)s>" %
                          {'volume_id': volume_ref.id,
                           'project_id': volume_ref.project_id,
                           'extra_info': local_extra_info})
                cinder.volume.utils.notify_about_volume_usage(
                    admin_context,
                    volume_ref,
                    'create.start', extra_usage_info=local_extra_info)
                cinder.volume.utils.notify_about_volume_usage(
                    admin_context,
                    volume_ref,
                    'create.end', extra_usage_info=local_extra_info)
            except Exception as e:
                LOG.error(_LE("Failed to send create notification for "
                              "volume %s.") % volume_ref.id)
                print(traceback.format_exc(e))

        if (CONF.send_actions and volume_ref.deleted_at and
                volume_ref.deleted_at > begin and
                volume_ref.deleted_at < end):
            try:
                local_extra_info = {
                    'audit_period_beginning': str(volume_ref.deleted_at),
                    'audit_period_ending': str(volume_ref.deleted_at),
                }
                LOG.debug("Send delete notification for "
                          "<volume_id: %(volume_id)s> "
                          "<project_id %(project_id)s> <%(extra_info)s>" %
                          {'volume_id': volume_ref.id,
                           'project_id': volume_ref.project_id,
                           'extra_info': local_extra_info})
                cinder.volume.utils.notify_about_volume_usage(
                    admin_context,
                    volume_ref,
                    'delete.start', extra_usage_info=local_extra_info)
                cinder.volume.utils.notify_about_volume_usage(
                    admin_context,
                    volume_ref,
                    'delete.end', extra_usage_info=local_extra_info)
            except Exception as e:
                LOG.error(_LE("Failed to send delete notification for volume "
                              "%s.") % volume_ref.id)
                print(traceback.format_exc(e))

    snapshots = db.snapshot_get_active_by_window(admin_context,
                                                 begin,
                                                 end)
    print(_("Found %d snapshots") % len(snapshots))
    for snapshot_ref in snapshots:
        try:
            LOG.debug("Send notification for <snapshot_id: %(snapshot_id)s> "
                      "<project_id %(project_id)s> <%(extra_info)s>" %
                      {'snapshot_id': snapshot_ref.id,
                       'project_id': snapshot_ref.project_id,
                       'extra_info': extra_info})
            cinder.volume.utils.notify_about_snapshot_usage(admin_context,
                                                            snapshot_ref,
                                                            'exists',
                                                            extra_info)
        except Exception as e:
            LOG.error(_LE("Failed to send exists notification "
                          "for snapshot %s.")
                      % snapshot_ref.id)
            print(traceback.format_exc(e))

        if (CONF.send_actions and
                snapshot_ref.created_at > begin and
                snapshot_ref.created_at < end):
            try:
                local_extra_info = {
                    'audit_period_beginning': str(snapshot_ref.created_at),
                    'audit_period_ending': str(snapshot_ref.created_at),
                }
                LOG.debug("Send create notification for "
                          "<snapshot_id: %(snapshot_id)s> "
                          "<project_id %(project_id)s> <%(extra_info)s>" %
                          {'snapshot_id': snapshot_ref.id,
                           'project_id': snapshot_ref.project_id,
                           'extra_info': local_extra_info})
                cinder.volume.utils.notify_about_snapshot_usage(
                    admin_context,
                    snapshot_ref,
                    'create.start', extra_usage_info=local_extra_info)
                cinder.volume.utils.notify_about_snapshot_usage(
                    admin_context,
                    snapshot_ref,
                    'create.end', extra_usage_info=local_extra_info)
            except Exception as e:
                LOG.error(_LE("Failed to send create notification for snapshot"
                              "%s.") % snapshot_ref.id)
                print(traceback.format_exc(e))

        if (CONF.send_actions and snapshot_ref.deleted_at and
                snapshot_ref.deleted_at > begin and
                snapshot_ref.deleted_at < end):
            try:
                local_extra_info = {
                    'audit_period_beginning': str(snapshot_ref.deleted_at),
                    'audit_period_ending': str(snapshot_ref.deleted_at),
                }
                LOG.debug("Send delete notification for "
                          "<snapshot_id: %(snapshot_id)s> "
                          "<project_id %(project_id)s> <%(extra_info)s>" %
                          {'snapshot_id': snapshot_ref.id,
                           'project_id': snapshot_ref.project_id,
                           'extra_info': local_extra_info})
                cinder.volume.utils.notify_about_snapshot_usage(
                    admin_context,
                    snapshot_ref,
                    'delete.start', extra_usage_info=local_extra_info)
                cinder.volume.utils.notify_about_snapshot_usage(
                    admin_context,
                    snapshot_ref,
                    'delete.end', extra_usage_info=local_extra_info)
            except Exception as e:
                LOG.error(_LE("Failed to send delete notification for snapshot"
                              "%s.") % snapshot_ref.id)
                print(traceback.format_exc(e))

    print(_("Volume usage audit completed"))
Пример #35
0
#    a copy of the License at
#
#         http://www.apache.org/licenses/LICENSE-2.0
#
#    Unless required by applicable law or agreed to in writing, software
#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
#    License for the specific language governing permissions and limitations
#    under the License.

import os

from oslo.config import cfg

from cinder.openstack.common import gettextutils

gettextutils.install('cinder', lazy=False)

from cinder.db.sqlalchemy import migrate_repo
from cinder import version

from migrate.versioning.shell import main

CONF = cfg.CONF

if __name__ == '__main__':
    CONF([], project='cinder', version=version.version_string())
    main(debug='False',
         url=CONF.database.connection,
         repository=os.path.abspath(os.path.dirname(migrate_repo.__file__)))
Пример #36
0
 def list(self):
     print(version.version_string())
Пример #37
0
def parse_args(argv, default_config_files=None):
    FLAGS(argv[1:],
          project='cinder',
          version=version.version_string(),
          default_config_files=default_config_files)
Пример #38
0
 def service_version(self):
     return version.version_string()
Пример #39
0
def main():
    objects.register_all()
    gmr_opts.set_defaults(CONF)
    CONF(sys.argv[1:], project='cinder', version=version.version_string())
    config.set_middleware_defaults()
    logging.setup(CONF, "cinder")
    LOG = logging.getLogger('cinder.all')
    versionutils.report_deprecated_feature(
        LOG,
        _('cinder-all is deprecated in Newton and will be removed in Ocata.'))

    utils.monkey_patch()

    gmr.TextGuruMeditation.setup_autorun(version, conf=CONF)

    rpc.init(CONF)

    launcher = service.process_launcher()
    # cinder-api
    try:
        server = service.WSGIService('osapi_volume')
        launcher.launch_service(server, workers=server.workers or 1)
    except (Exception, SystemExit):
        LOG.exception(_LE('Failed to load osapi_volume'))

    for binary in ['cinder-scheduler', 'cinder-backup']:
        try:
            launcher.launch_service(service.Service.create(binary=binary))
        except (Exception, SystemExit):
            LOG.exception(_LE('Failed to load %s'), binary)

    # cinder-volume
    try:
        if CONF.enabled_backends:
            for backend in filter(None, CONF.enabled_backends):
                CONF.register_opt(volume_cmd.host_opt, group=backend)
                backend_host = getattr(CONF, backend).backend_host
                host = "%s@%s" % (backend_host or CONF.host, backend)
                server = service.Service.create(host=host,
                                                service_name=backend,
                                                binary='cinder-volume',
                                                coordination=True)
                # Dispose of the whole DB connection pool here before
                # starting another process.  Otherwise we run into cases
                # where child processes share DB connections which results
                # in errors.
                session.dispose_engine()
                launcher.launch_service(server)
        else:
            LOG.warning(
                _LW('Configuration for cinder-volume does not specify '
                    '"enabled_backends", using DEFAULT as backend. '
                    'Support for DEFAULT section to configure drivers '
                    'will be removed in the next release.'))
            server = service.Service.create(binary='cinder-volume',
                                            coordination=True)
            launcher.launch_service(server)
    except (Exception, SystemExit):
        LOG.exception(_LE('Failed to load cinder-volume'))

    launcher.wait()
Пример #40
0
    def start(self):
        version_string = version.version_string()
        LOG.info(_LI('Starting %(topic)s node (version %(version_string)s)'), {
            'topic': self.topic,
            'version_string': version_string
        })
        self.model_disconnected = False

        if self.coordination:
            coordination.COORDINATOR.start()

        self.manager.init_host(added_to_cluster=self.added_to_cluster,
                               service_id=Service.service_id)

        LOG.debug("Creating RPC server for service %s", self.topic)

        ctxt = context.get_admin_context()
        target = messaging.Target(topic=self.topic, server=self.host)
        endpoints = [self.manager]
        endpoints.extend(self.manager.additional_endpoints)
        obj_version_cap = objects.Service.get_minimum_obj_version(ctxt)
        LOG.debug("Pinning object versions for RPC server serializer to %s",
                  obj_version_cap)
        serializer = objects_base.CinderObjectSerializer(obj_version_cap)
        self.rpcserver = rpc.get_server(target, endpoints, serializer)
        self.rpcserver.start()

        # TODO(geguileo): In O - Remove the is_svc_upgrading_to_n part
        if self.cluster and not self.is_svc_upgrading_to_n(self.binary):
            LOG.info(
                _LI('Starting %(topic)s cluster %(cluster)s (version '
                    '%(version)s)'), {
                        'topic': self.topic,
                        'version': version_string,
                        'cluster': self.cluster
                    })
            target = messaging.Target(topic=self.topic, server=self.cluster)
            serializer = objects_base.CinderObjectSerializer(obj_version_cap)
            self.cluster_rpcserver = rpc.get_server(target, endpoints,
                                                    serializer)
            self.cluster_rpcserver.start()

        self.manager.init_host_with_rpc()

        if self.report_interval:
            pulse = loopingcall.FixedIntervalLoopingCall(self.report_state)
            pulse.start(interval=self.report_interval,
                        initial_delay=self.report_interval)
            self.timers.append(pulse)

        if self.periodic_interval:
            if self.periodic_fuzzy_delay:
                initial_delay = random.randint(0, self.periodic_fuzzy_delay)
            else:
                initial_delay = None

            periodic = loopingcall.FixedIntervalLoopingCall(
                self.periodic_tasks)
            periodic.start(interval=self.periodic_interval,
                           initial_delay=initial_delay)
            self.timers.append(periodic)
Пример #41
0
from cinder import context
from cinder import keymgr
from cinder import utils

from cinder.config import reset_conf

from eventlet.green import subprocess
from eventlet import spawn
from eventlet import sleep


# debug , need be notes
# python2  proc_manager.py  --config-file /usr/share/cinder/cinder-dist.conf  --config-file /etc/cinder/cinder.conf
CONF = cfg.CONF
CONF(sys.argv[1:], project='cinder',
     version=version.version_string())


ctxt = context.get_admin_context()
LOG = logging.getLogger(__name__)


class VolumeProcessManager(six.with_metaclass(service.Singleton, base.Base)):
    # class VolumeProcessManager(object):
    def __init__(self, db_driver=None):
        super(VolumeProcessManager, self).__init__(db_driver)

        self.scheduler_name_proc_dict = {}
        self.conf_dir = "/etc/cinder"

    def get_all_backend(self):
Пример #42
0
        return list_response

    def GetCapacity(self, req, context):
        pass

    def ControllerGetCapabilities(self, req, context):
        pass


def serve():
    server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
    csi_pb2_grpc.add_ControllerServicer_to_server(CinderServicer(), server)
    server.add_insecure_port('[::]:50051')
    server.start()
    print("now serving...")
    try:
        while True:
            time.sleep(_ONE_DAY_IN_SECONDS)
    except KeyboardInterrupt:
        server.stop(0)


if __name__ == '__main__':
    objects.register_all()
    CONF(sys.argv[1:], project='cinder', version=version.version_string())
    config.set_middleware_defaults()
    #utils.monkey_patch()
    rpc.init(CONF)

    serve()
Пример #43
0
def main():
    objects.register_all()
    admin_context = context.get_admin_context()
    CONF(sys.argv[1:], project='cinder',
         version=version.version_string())
    logging.setup(CONF, "cinder")
    LOG = logging.getLogger("cinder")
    rpc.init(CONF)
    begin, end = utils.last_completed_audit_period()
    if CONF.start_time:
        begin = datetime.datetime.strptime(CONF.start_time,
                                           "%Y-%m-%d %H:%M:%S")
    if CONF.end_time:
        end = datetime.datetime.strptime(CONF.end_time,
                                         "%Y-%m-%d %H:%M:%S")
    begin = begin.replace(tzinfo=iso8601.Utc())
    end = end.replace(tzinfo=iso8601.Utc())
    if not end > begin:
        msg = _("The end time (%(end)s) must be after the start "
                "time (%(start)s).") % {'start': begin,
                                        'end': end}
        LOG.error(msg)
        sys.exit(-1)
    LOG.info(_LI("Starting volume usage audit"))
    msg = _LI("Creating usages for %(begin_period)s until %(end_period)s")
    LOG.info(msg, {"begin_period": str(begin), "end_period": str(end)})

    extra_info = {
        'audit_period_beginning': str(begin),
        'audit_period_ending': str(end),
    }

    volumes = objects.VolumeList.get_all_active_by_window(admin_context,
                                                          begin,
                                                          end)
    LOG.info(_LI("Found %d volumes"), len(volumes))
    for volume_ref in volumes:
        try:
            cinder.volume.utils.notify_about_volume_usage(
                admin_context,
                volume_ref,
                'exists', extra_usage_info=extra_info)
            LOG.debug("Sent exists notification for <volume_id: "
                      "%(volume_id)s> <project_id %(project_id)s> "
                      "<%(extra_info)s>",
                      {'volume_id': volume_ref.id,
                       'project_id': volume_ref.project_id,
                       'extra_info': extra_info})
        except Exception as exc_msg:
            LOG.exception(_LE("Exists volume notification failed: %s"),
                          exc_msg, resource=volume_ref)

        if (CONF.send_actions and
                volume_ref.created_at > begin and
                volume_ref.created_at < end):
            try:
                local_extra_info = {
                    'audit_period_beginning': str(volume_ref.created_at),
                    'audit_period_ending': str(volume_ref.created_at),
                }
                cinder.volume.utils.notify_about_volume_usage(
                    admin_context,
                    volume_ref,
                    'create.start', extra_usage_info=local_extra_info)
                cinder.volume.utils.notify_about_volume_usage(
                    admin_context,
                    volume_ref,
                    'create.end', extra_usage_info=local_extra_info)
                LOG.debug("Sent create notification for "
                          "<volume_id: %(volume_id)s> "
                          "<project_id %(project_id)s> <%(extra_info)s>",
                          {'volume_id': volume_ref.id,
                           'project_id': volume_ref.project_id,
                           'extra_info': local_extra_info})
            except Exception as exc_msg:
                LOG.exception(_LE("Create volume notification failed: %s"),
                              exc_msg, resource=volume_ref)

        if (CONF.send_actions and volume_ref.deleted_at and
                volume_ref.deleted_at > begin and
                volume_ref.deleted_at < end):
            try:
                local_extra_info = {
                    'audit_period_beginning': str(volume_ref.deleted_at),
                    'audit_period_ending': str(volume_ref.deleted_at),
                }
                cinder.volume.utils.notify_about_volume_usage(
                    admin_context,
                    volume_ref,
                    'delete.start', extra_usage_info=local_extra_info)
                cinder.volume.utils.notify_about_volume_usage(
                    admin_context,
                    volume_ref,
                    'delete.end', extra_usage_info=local_extra_info)
                LOG.debug("Sent delete notification for "
                          "<volume_id: %(volume_id)s> "
                          "<project_id %(project_id)s> <%(extra_info)s>",
                          {'volume_id': volume_ref.id,
                           'project_id': volume_ref.project_id,
                           'extra_info': local_extra_info})
            except Exception as exc_msg:
                LOG.exception(_LE("Delete volume notification failed: %s"),
                              exc_msg, resource=volume_ref)

    snapshots = objects.SnapshotList.get_all_active_by_window(admin_context,
                                                              begin, end)
    LOG.info(_LI("Found %d snapshots"), len(snapshots))
    for snapshot_ref in snapshots:
        try:
            cinder.volume.utils.notify_about_snapshot_usage(admin_context,
                                                            snapshot_ref,
                                                            'exists',
                                                            extra_info)
            LOG.debug("Sent notification for <snapshot_id: %(snapshot_id)s> "
                      "<project_id %(project_id)s> <%(extra_info)s>",
                      {'snapshot_id': snapshot_ref.id,
                       'project_id': snapshot_ref.project_id,
                       'extra_info': extra_info})
        except Exception as exc_msg:
            LOG.exception(_LE("Exists snapshot notification failed: %s"),
                          exc_msg, resource=snapshot_ref)

        if (CONF.send_actions and
                snapshot_ref.created_at > begin and
                snapshot_ref.created_at < end):
            try:
                local_extra_info = {
                    'audit_period_beginning': str(snapshot_ref.created_at),
                    'audit_period_ending': str(snapshot_ref.created_at),
                }
                cinder.volume.utils.notify_about_snapshot_usage(
                    admin_context,
                    snapshot_ref,
                    'create.start', extra_usage_info=local_extra_info)
                cinder.volume.utils.notify_about_snapshot_usage(
                    admin_context,
                    snapshot_ref,
                    'create.end', extra_usage_info=local_extra_info)
                LOG.debug("Sent create notification for "
                          "<snapshot_id: %(snapshot_id)s> "
                          "<project_id %(project_id)s> <%(extra_info)s>",
                          {'snapshot_id': snapshot_ref.id,
                           'project_id': snapshot_ref.project_id,
                           'extra_info': local_extra_info})
            except Exception as exc_msg:
                LOG.exception(_LE("Create snapshot notification failed: %s"),
                              exc_msg, resource=snapshot_ref)

        if (CONF.send_actions and snapshot_ref.deleted_at and
                snapshot_ref.deleted_at > begin and
                snapshot_ref.deleted_at < end):
            try:
                local_extra_info = {
                    'audit_period_beginning': str(snapshot_ref.deleted_at),
                    'audit_period_ending': str(snapshot_ref.deleted_at),
                }
                cinder.volume.utils.notify_about_snapshot_usage(
                    admin_context,
                    snapshot_ref,
                    'delete.start', extra_usage_info=local_extra_info)
                cinder.volume.utils.notify_about_snapshot_usage(
                    admin_context,
                    snapshot_ref,
                    'delete.end', extra_usage_info=local_extra_info)
                LOG.debug("Sent delete notification for "
                          "<snapshot_id: %(snapshot_id)s> "
                          "<project_id %(project_id)s> <%(extra_info)s>",
                          {'snapshot_id': snapshot_ref.id,
                           'project_id': snapshot_ref.project_id,
                           'extra_info': local_extra_info})
            except Exception as exc_msg:
                LOG.exception(_LE("Delete snapshot notification failed: %s"),
                              exc_msg, resource=snapshot_ref)

    backups = objects.BackupList.get_all_active_by_window(admin_context,
                                                          begin, end)

    LOG.info(_LI("Found %d backups"), len(backups))
    for backup_ref in backups:
        try:
            cinder.volume.utils.notify_about_backup_usage(admin_context,
                                                          backup_ref,
                                                          'exists',
                                                          extra_info)
            LOG.debug("Sent notification for <backup_id: %(backup_id)s> "
                      "<project_id %(project_id)s> <%(extra_info)s>",
                      {'backup_id': backup_ref.id,
                       'project_id': backup_ref.project_id,
                       'extra_info': extra_info})
        except Exception as exc_msg:
            LOG.error(_LE("Exists backups notification failed: %s"),
                      exc_msg)

        if (CONF.send_actions and
                backup_ref.created_at > begin and
                backup_ref.created_at < end):
            try:
                local_extra_info = {
                    'audit_period_beginning': str(backup_ref.created_at),
                    'audit_period_ending': str(backup_ref.created_at),
                }
                cinder.volume.utils.notify_about_backup_usage(
                    admin_context,
                    backup_ref,
                    'create.start', extra_usage_info=local_extra_info)
                cinder.volume.utils.notify_about_backup_usage(
                    admin_context,
                    backup_ref,
                    'create.end', extra_usage_info=local_extra_info)
                LOG.debug("Sent create notification for "
                          "<backup_id: %(backup_id)s> "
                          "<project_id %(project_id)s> <%(extra_info)s>",
                          {'backup_id': backup_ref.id,
                           'project_id': backup_ref.project_id,
                           'extra_info': local_extra_info})
            except Exception as exc_msg:
                LOG.error(_LE("Create backup notification failed: %s"),
                          exc_msg)

        if (CONF.send_actions and backup_ref.deleted_at and
                backup_ref.deleted_at > begin and
                backup_ref.deleted_at < end):
            try:
                local_extra_info = {
                    'audit_period_beginning': str(backup_ref.deleted_at),
                    'audit_period_ending': str(backup_ref.deleted_at),
                }
                cinder.volume.utils.notify_about_backup_usage(
                    admin_context,
                    backup_ref,
                    'delete.start', extra_usage_info=local_extra_info)
                cinder.volume.utils.notify_about_backup_usage(
                    admin_context,
                    backup_ref,
                    'delete.end', extra_usage_info=local_extra_info)
                LOG.debug("Sent delete notification for "
                          "<backup_id: %(backup_id)s> "
                          "<project_id %(project_id)s> <%(extra_info)s>",
                          {'backup_id': backup_ref.id,
                           'project_id': backup_ref.project_id,
                           'extra_info': local_extra_info})
            except Exception as exc_msg:
                LOG.error(_LE("Delete backup notification failed: %s"),
                          exc_msg)

    LOG.info(_LI("Volume usage audit completed"))
Пример #44
0
    def start(self):
        version_string = version.version_string()
        LOG.info('Starting %(topic)s node (version %(version_string)s)', {
            'topic': self.topic,
            'version_string': version_string
        })
        self.model_disconnected = False

        if self.coordination:
            coordination.COORDINATOR.start()

        self.manager.init_host(added_to_cluster=self.added_to_cluster,
                               service_id=Service.service_id)

        LOG.debug("Creating RPC server for service %s", self.topic)

        ctxt = context.get_admin_context()
        endpoints = [self.manager]
        endpoints.extend(self.manager.additional_endpoints)
        obj_version_cap = objects.Service.get_minimum_obj_version(ctxt)
        LOG.debug("Pinning object versions for RPC server serializer to %s",
                  obj_version_cap)
        serializer = objects_base.CinderObjectSerializer(obj_version_cap)

        target = messaging.Target(topic=self.topic, server=self.host)
        self.rpcserver = rpc.get_server(target, endpoints, serializer)
        self.rpcserver.start()

        # NOTE(dulek): Kids, don't do that at home. We're relying here on
        # oslo.messaging implementation details to keep backward compatibility
        # with pre-Ocata services. This will not matter once we drop
        # compatibility with them.
        if self.topic == constants.VOLUME_TOPIC:
            target = messaging.Target(topic='%(topic)s.%(host)s' % {
                'topic': self.topic,
                'host': self.host
            },
                                      server=vol_utils.extract_host(
                                          self.host, 'host'))
            self.backend_rpcserver = rpc.get_server(target, endpoints,
                                                    serializer)
            self.backend_rpcserver.start()

        if self.cluster:
            LOG.info(
                'Starting %(topic)s cluster %(cluster)s (version '
                '%(version)s)', {
                    'topic': self.topic,
                    'version': version_string,
                    'cluster': self.cluster
                })
            target = messaging.Target(
                topic='%s.%s' % (self.topic, self.cluster),
                server=vol_utils.extract_host(self.cluster, 'host'))
            serializer = objects_base.CinderObjectSerializer(obj_version_cap)
            self.cluster_rpcserver = rpc.get_server(target, endpoints,
                                                    serializer)
            self.cluster_rpcserver.start()

        self.manager.init_host_with_rpc()

        if self.report_interval:
            pulse = loopingcall.FixedIntervalLoopingCall(self.report_state)
            pulse.start(interval=self.report_interval,
                        initial_delay=self.report_interval)
            self.timers.append(pulse)

        if self.periodic_interval:
            if self.periodic_fuzzy_delay:
                initial_delay = random.randint(0, self.periodic_fuzzy_delay)
            else:
                initial_delay = None

            periodic = loopingcall.FixedIntervalLoopingCall(
                self.periodic_tasks)
            periodic.start(interval=self.periodic_interval,
                           initial_delay=initial_delay)
            self.timers.append(periodic)
 def service_version(self, context):
     return version.version_string()