Пример #1
0
def main():
    objects.register_all()
    gmr_opts.set_defaults(CONF)
    CONF(sys.argv[1:], project='cinder',
         version=version.version_string())
    logging.set_defaults(
        default_log_levels=logging.get_default_log_levels() +
        _EXTRA_DEFAULT_LOG_LEVELS)
    logging.setup(CONF, "cinder")
    python_logging.captureWarnings(True)
    priv_context.init(root_helper=shlex.split(utils.get_root_helper()))
    utils.monkey_patch()
    gmr.TextGuruMeditation.setup_autorun(version, conf=CONF)
    global LOG
    LOG = logging.getLogger(__name__)

    if CONF.backup_workers > 1:
        LOG.info('Backup running with %s processes.', CONF.backup_workers)
        launcher = service.get_launcher()

        for i in range(CONF.backup_workers):
            _launch_backup_process(launcher, i)

        launcher.wait()
    else:
        LOG.info('Backup running in single process mode.')
        server = service.Service.create(binary='cinder-backup',
                                        coordination=True,
                                        process_number=1)
        service.serve(server)
        service.wait()
Пример #2
0
def main():
    objects.register_all()
    CONF(sys.argv[1:], project='cinder',
         version=version.version_string())
    logging.setup(CONF, "cinder")
    utils.monkey_patch()
    gmr.TextGuruMeditation.setup_autorun(version)
    launcher = service.get_launcher()
    if CONF.enabled_backends:
        for backend in CONF.enabled_backends:
            CONF.register_opt(host_opt, group=backend)
            backend_host = getattr(CONF, backend).backend_host
            host = "%s@%s" % (backend_host or CONF.host, backend)
            server = service.Service.create(host=host,
                                            service_name=backend,
                                            binary='cinder-volume')
            # Dispose of the whole DB connection pool here before
            # starting another process.  Otherwise we run into cases where
            # child processes share DB connections which results in errors.
            session.dispose_engine()
            launcher.launch_service(server)
    else:
        server = service.Service.create(binary='cinder-volume')
        launcher.launch_service(server)
    launcher.wait()
Пример #3
0
def main():
    objects.register_all()
    """Parse options and call the appropriate class/method."""
    CONF.register_cli_opt(category_opt)
    script_name = sys.argv[0]
    if len(sys.argv) < 2:
        print(_("\nOpenStack Cinder version: %(version)s\n") %
              {'version': version.version_string()})
        print(script_name + " category action [<args>]")
        print(_("Available categories:"))
        for category in CATEGORIES:
            print(_("\t%s") % category)
        sys.exit(2)

    try:
        CONF(sys.argv[1:], project='cinder',
             version=version.version_string())
        logging.setup(CONF, "cinder")
        python_logging.captureWarnings(True)
    except cfg.ConfigDirNotFoundError as details:
        print(_("Invalid directory: %s") % details)
        sys.exit(2)
    except cfg.ConfigFilesNotFoundError as e:
        cfg_files = e.config_files
        print(_("Failed to read configuration file(s): %s") % cfg_files)
        sys.exit(2)

    fn = CONF.category.action_fn
    fn_kwargs = fetch_func_args(fn)
    fn(**fn_kwargs)
Пример #4
0
def main():
    objects.register_all()
    gmr_opts.set_defaults(CONF)
    CONF(sys.argv[1:], project="cinder", version=version.version_string())
    config.set_middleware_defaults()
    logging.setup(CONF, "cinder")
    LOG = logging.getLogger("cinder.all")
    versionutils.report_deprecated_feature(LOG, _("cinder-all is deprecated in Newton and will be removed in Ocata."))

    utils.monkey_patch()

    gmr.TextGuruMeditation.setup_autorun(version, conf=CONF)

    rpc.init(CONF)

    launcher = service.process_launcher()
    # cinder-api
    try:
        server = service.WSGIService("osapi_volume")
        launcher.launch_service(server, workers=server.workers or 1)
    except (Exception, SystemExit):
        LOG.exception(_LE("Failed to load osapi_volume"))

    for binary in ["cinder-scheduler", "cinder-backup"]:
        try:
            launcher.launch_service(service.Service.create(binary=binary))
        except (Exception, SystemExit):
            LOG.exception(_LE("Failed to load %s"), binary)

    # cinder-volume
    try:
        if CONF.enabled_backends:
            for backend in CONF.enabled_backends:
                CONF.register_opt(volume_cmd.host_opt, group=backend)
                backend_host = getattr(CONF, backend).backend_host
                host = "%s@%s" % (backend_host or CONF.host, backend)
                server = service.Service.create(
                    host=host, service_name=backend, binary="cinder-volume", coordination=True
                )
                # Dispose of the whole DB connection pool here before
                # starting another process.  Otherwise we run into cases
                # where child processes share DB connections which results
                # in errors.
                session.dispose_engine()
                launcher.launch_service(server)
        else:
            LOG.warning(
                _LW(
                    "Configuration for cinder-volume does not specify "
                    '"enabled_backends", using DEFAULT as backend. '
                    "Support for DEFAULT section to configure drivers "
                    "will be removed in the next release."
                )
            )
            server = service.Service.create(binary="cinder-volume", coordination=True)
            launcher.launch_service(server)
    except (Exception, SystemExit):
        LOG.exception(_LE("Failed to load cinder-volume"))

    launcher.wait()
Пример #5
0
def main():
    objects.register_all()
    """Parse options and call the appropriate class/method."""
    CONF.register_cli_opt(category_opt)
    script_name = sys.argv[0]
    if len(sys.argv) < 2:
        print(_("\nOpenStack Cinder version: %(version)s\n") %
              {'version': version.version_string()})
        print(script_name + " category action [<args>]")
        print(_("Available categories:"))
        for category in CATEGORIES:
            print(_("\t%s") % category)
        sys.exit(2)

    try:
        CONF(sys.argv[1:], project='cinder',
             version=version.version_string())
        logging.setup(CONF, "cinder")
    except cfg.ConfigFilesNotFoundError:
        cfgfile = CONF.config_file[-1] if CONF.config_file else None
        if cfgfile and not os.access(cfgfile, os.R_OK):
            st = os.stat(cfgfile)
            print(_("Could not read %s. Re-running with sudo") % cfgfile)
            try:
                os.execvp('sudo', ['sudo', '-u', '#%s' % st.st_uid] + sys.argv)
            except Exception:
                print(_('sudo failed, continuing as if nothing happened'))

        print(_('Please re-run cinder-manage as root.'))
        sys.exit(2)

    fn = CONF.category.action_fn
    fn_args = fetch_func_args(fn)
    fn(*fn_args)
Пример #6
0
def main():
    objects.register_all()
    gmr_opts.set_defaults(CONF)
    CONF(sys.argv[1:], project='cinder',
         version=version.version_string())
    logging.setup(CONF, "cinder")
    python_logging.captureWarnings(True)
    priv_context.init(root_helper=shlex.split(utils.get_root_helper()))
    utils.monkey_patch()
    gmr.TextGuruMeditation.setup_autorun(version, conf=CONF)
    global LOG
    LOG = logging.getLogger(__name__)

    if not CONF.enabled_backends:
        LOG.error('Configuration for cinder-volume does not specify '
                  '"enabled_backends". Using DEFAULT section to configure '
                  'drivers is not supported since Ocata.')
        sys.exit(1)

    if os.name == 'nt':
        # We cannot use oslo.service to spawn multiple services on Windows.
        # It relies on forking, which is not available on Windows.
        # Furthermore, service objects are unmarshallable objects that are
        # passed to subprocesses.
        _launch_services_win32()
    else:
        _launch_services_posix()
Пример #7
0
def initialize_application():
    objects.register_all()
    CONF(sys.argv[1:], project='cinder',
         version=version.version_string())
    logging.setup(CONF, "cinder")

    rpc.init(CONF)
    return wsgi.Loader(CONF).load_app(name='osapi_volume')
Пример #8
0
def initialize_application():
    objects.register_all()
    CONF(sys.argv[1:], project="cinder", version=version.version_string())
    logging.setup(CONF, "cinder")
    config.set_middleware_defaults()

    rpc.init(CONF)
    return wsgi.Loader(CONF).load_app(name="osapi_volume")
Пример #9
0
def main():
    objects.register_all()
    gmr_opts.set_defaults(CONF)
    CONF(sys.argv[1:], project='cinder',
         version=version.version_string())
    logging.setup(CONF, "cinder")
    python_logging.captureWarnings(True)
    priv_context.init(root_helper=shlex.split(utils.get_root_helper()))
    utils.monkey_patch()
    gmr.TextGuruMeditation.setup_autorun(version, conf=CONF)
    launcher = service.get_launcher()
    LOG = logging.getLogger(__name__)
    service_started = False

    if CONF.enabled_backends:
        for backend in filter(None, CONF.enabled_backends):
            CONF.register_opt(host_opt, group=backend)
            backend_host = getattr(CONF, backend).backend_host
            host = "%s@%s" % (backend_host or CONF.host, backend)
            # We also want to set cluster to None on empty strings, and we
            # ignore leading and trailing spaces.
            cluster = CONF.cluster and CONF.cluster.strip()
            cluster = (cluster or None) and '%s@%s' % (cluster, backend)
            try:
                server = service.Service.create(host=host,
                                                service_name=backend,
                                                binary='cinder-volume',
                                                coordination=True,
                                                cluster=cluster)
            except Exception:
                msg = _('Volume service %s failed to start.') % host
                LOG.exception(msg)
            else:
                # Dispose of the whole DB connection pool here before
                # starting another process.  Otherwise we run into cases where
                # child processes share DB connections which results in errors.
                session.dispose_engine()
                launcher.launch_service(server)
                service_started = True
    else:
        LOG.warning(_LW('Configuration for cinder-volume does not specify '
                        '"enabled_backends", using DEFAULT as backend. '
                        'Support for DEFAULT section to configure drivers '
                        'will be removed in the next release.'))
        server = service.Service.create(binary='cinder-volume',
                                        coordination=True,
                                        cluster=CONF.cluster)
        launcher.launch_service(server)
        service_started = True

    if not service_started:
        msg = _('No volume service(s) started successfully, terminating.')
        LOG.error(msg)
        sys.exit(1)

    launcher.wait()
Пример #10
0
def main():
    objects.register_all()
    CONF(sys.argv[1:], project='cinder',   #
         version=version.version_string())
    logging.setup(CONF, "cinder")
    utils.monkey_patch()
    gmr.TextGuruMeditation.setup_autorun(version)
    server = service.Service.create(binary='cinder-scheduler')
    service.serve(server)
    service.wait()
Пример #11
0
def initialize_application():
    objects.register_all()
    CONF(sys.argv[1:], project='cinder',
         version=version.version_string())
    logging.setup(CONF, "cinder")
    config.set_middleware_defaults()

    rpc.init(CONF)
    service.setup_profiler(constants.API_BINARY, CONF.host)

    return wsgi.Loader(CONF).load_app(name='osapi_volume')
Пример #12
0
def main():
    objects.register_all()
    CONF(sys.argv[1:], project='cinder',
         version=version.version_string())
    logging.setup(CONF, "cinder")
    python_logging.captureWarnings(True)
    utils.monkey_patch()
    gmr.TextGuruMeditation.setup_autorun(version)
    server = service.Service.create(binary='cinder-backup')
    service.serve(server)
    service.wait()
Пример #13
0
def main():
    objects.register_all()
    gmr_opts.set_defaults(CONF)
    CONF(sys.argv[1:], project='cinder',
         version=version.version_string())
    config.set_middleware_defaults()
    logging.setup(CONF, "cinder")
    LOG = logging.getLogger('cinder.all')
    versionutils.report_deprecated_feature(LOG, _(
        'cinder-all is deprecated in Newton and will be removed in Ocata.'))

    utils.monkey_patch()

    gmr.TextGuruMeditation.setup_autorun(version, conf=CONF)

    rpc.init(CONF)

    launcher = service.process_launcher()
    # cinder-api
    try:
        server = service.WSGIService('osapi_volume')
        launcher.launch_service(server, workers=server.workers or 1)
    except (Exception, SystemExit):
        LOG.exception(_LE('Failed to load osapi_volume'))

    for binary in ['cinder-scheduler', 'cinder-backup']:
        try:
            launcher.launch_service(service.Service.create(binary=binary))
        except (Exception, SystemExit):
            LOG.exception(_LE('Failed to load %s'), binary)

    # cinder-volume
    try:
        if CONF.enabled_backends:
            for backend in CONF.enabled_backends:
                CONF.register_opt(volume_cmd.host_opt, group=backend)
                backend_host = getattr(CONF, backend).backend_host
                host = "%s@%s" % (backend_host or CONF.host, backend)
                server = service.Service.create(host=host,
                                                service_name=backend,
                                                binary='cinder-volume')
                # Dispose of the whole DB connection pool here before
                # starting another process.  Otherwise we run into cases
                # where child processes share DB connections which results
                # in errors.
                session.dispose_engine()
                launcher.launch_service(server)
        else:
            server = service.Service.create(binary='cinder-volume')
            launcher.launch_service(server)
    except (Exception, SystemExit):
        LOG.exception(_LE('Failed to load conder-volume'))

    launcher.wait()
Пример #14
0
def main():
    objects.register_all()
    CONF(sys.argv[1:], project='cinder',
         version=version.version_string())
    logging.setup(CONF, "cinder")
    utils.monkey_patch()

    rpc.init(CONF)
    launcher = service.process_launcher()
    server = service.WSGIService('osapi_volume')
    launcher.launch_service(server, workers=server.workers)
    launcher.wait()
Пример #15
0
def main():
    objects.register_all()
    gmr_opts.set_defaults(CONF)
    CONF(sys.argv[1:], project='cinder',
         version=version.version_string())
    logging.setup(CONF, "cinder")
    python_logging.captureWarnings(True)
    priv_context.init(root_helper=shlex.split(utils.get_root_helper()))
    utils.monkey_patch()
    gmr.TextGuruMeditation.setup_autorun(version, conf=CONF)
    server = service.Service.create(binary='cinder-backup')
    service.serve(server)
    service.wait()
Пример #16
0
def main():
    objects.register_all()
    admin_context = context.get_admin_context()
    CONF(sys.argv[1:], project='cinder',
         version=version.version_string())
    logging.setup(CONF, "cinder")
    LOG = logging.getLogger("cinder")
    rpc.init(CONF)

    begin, end = utils.last_completed_audit_period()
    begin, end = _time_error(LOG, begin, end)

    LOG.info("Starting volume usage audit")
    LOG.info("Creating usages for %(begin_period)s until %(end_period)s",
             {"begin_period": begin, "end_period": end})

    extra_info = {
        'audit_period_beginning': str(begin),
        'audit_period_ending': str(end),
    }

    volumes = objects.VolumeList.get_all_active_by_window(admin_context,
                                                          begin,
                                                          end)

    LOG.info("Found %d volumes", len(volumes))
    for volume_ref in volumes:
        _obj_ref_action(_vol_notify_usage, LOG, volume_ref, extra_info,
                        admin_context, begin, end,
                        cinder.volume.utils.notify_about_volume_usage,
                        "volume_id", "volume")

    snapshots = objects.SnapshotList.get_all_active_by_window(admin_context,
                                                              begin, end)
    LOG.info("Found %d snapshots", len(snapshots))
    for snapshot_ref in snapshots:
        _obj_ref_action(_snap_notify_usage, LOG, snapshot_ref, extra_info,
                        admin_context, begin,
                        end, cinder.volume.utils.notify_about_snapshot_usage,
                        "snapshot_id", "snapshot")

    backups = objects.BackupList.get_all_active_by_window(admin_context,
                                                          begin, end)

    LOG.info("Found %d backups", len(backups))
    for backup_ref in backups:
        _obj_ref_action(_backup_notify_usage, LOG, backup_ref, extra_info,
                        admin_context, begin,
                        end, cinder.volume.utils.notify_about_backup_usage,
                        "backup_id", "backup")
    LOG.info("Volume usage audit completed")
Пример #17
0
def main():
    objects.register_all()
    CONF(sys.argv[1:], project='cinder',
         version=version.version_string())
    logging.setup(CONF, "cinder")
    python_logging.captureWarnings(True)
    utils.monkey_patch()

    gmr.TextGuruMeditation.setup_autorun(version)

    rpc.init(CONF)
    launcher = service.process_launcher()
    server = service.WSGIService('osapi_volume')
    launcher.launch_service(server, workers=server.workers)
    launcher.wait()
Пример #18
0
def main():
    objects.register_all()
    gmr_opts.set_defaults(CONF)
    CONF(sys.argv[1:], project="cinder", version=version.version_string())
    config.set_middleware_defaults()
    logging.setup(CONF, "cinder")
    python_logging.captureWarnings(True)
    utils.monkey_patch()

    gmr.TextGuruMeditation.setup_autorun(version, conf=CONF)

    rpc.init(CONF)
    launcher = service.process_launcher()
    server = service.WSGIService("osapi_volume")
    launcher.launch_service(server, workers=server.workers)
    launcher.wait()
Пример #19
0
def main():
    objects.register_all()
    gmr_opts.set_defaults(CONF)
    CONF(sys.argv[1:], project='cinder',
         version=version.version_string())
    logging.setup(CONF, "cinder")
    python_logging.captureWarnings(True)
    utils.monkey_patch()
    gmr.TextGuruMeditation.setup_autorun(version, conf=CONF)
    launcher = service.get_launcher()
    LOG = logging.getLogger(__name__)
    service_started = False

    if CONF.enabled_backends:
        for backend in CONF.enabled_backends:
            CONF.register_opt(host_opt, group=backend)
            backend_host = getattr(CONF, backend).backend_host
            host = "%s@%s" % (backend_host or CONF.host, backend)
            try:
                server = service.Service.create(host=host,
                                                service_name=backend,
                                                binary='cinder-volume',
                                                coordination=True)
            except Exception:
                msg = _('Volume service %s failed to start.') % host
                LOG.exception(msg)
            else:
                # Dispose of the whole DB connection pool here before
                # starting another process.  Otherwise we run into cases where
                # child processes share DB connections which results in errors.
                session.dispose_engine()
                launcher.launch_service(server)
                service_started = True
    else:
        server = service.Service.create(binary='cinder-volume',
                                        coordination=True)
        launcher.launch_service(server)
        service_started = True

    if not service_started:
        msg = _('No volume service(s) started successfully, terminating.')
        LOG.error(msg)
        sys.exit(1)

    launcher.wait()
Пример #20
0
def main():
    objects.register_all()
    CONF(sys.argv[1:], project='cinder',
         version=version.version_string())
    logging.setup(CONF, "cinder")
    utils.monkey_patch()
    launcher = service.get_launcher()
    if CONF.enabled_backends:
        for backend in CONF.enabled_backends:
            CONF.register_opt(host_opt, group=backend)
            backend_host = getattr(CONF, backend).backend_host
            host = "%s@%s" % (backend_host or CONF.host, backend)
            server = service.Service.create(host=host,
                                            service_name=backend,
                                            binary='cinder-volume')
            launcher.launch_service(server)
    else:
        server = service.Service.create(binary='cinder-volume')
        launcher.launch_service(server)
    launcher.wait()
Пример #21
0
def main():
    objects.register_all()
    admin_context = context.get_admin_context()
    CONF(sys.argv[1:], project='cinder',
         version=version.version_string())
    logging.setup(CONF, "cinder")
    LOG = logging.getLogger("cinder")
    rpc.init(CONF)
    begin, end = utils.last_completed_audit_period()
    if CONF.start_time:
        begin = datetime.datetime.strptime(CONF.start_time,
                                           "%Y-%m-%d %H:%M:%S")
    if CONF.end_time:
        end = datetime.datetime.strptime(CONF.end_time,
                                         "%Y-%m-%d %H:%M:%S")
    if not end > begin:
        msg = _("The end time (%(end)s) must be after the start "
                "time (%(start)s).") % {'start': begin,
                                        'end': end}
        LOG.error(msg)
        sys.exit(-1)
    LOG.debug("Starting volume usage audit")
    msg = _("Creating usages for %(begin_period)s until %(end_period)s")
    LOG.debug(msg, {"begin_period": str(begin), "end_period": str(end)})

    extra_info = {
        'audit_period_beginning': str(begin),
        'audit_period_ending': str(end),
    }

    volumes = db.volume_get_active_by_window(admin_context,
                                             begin,
                                             end)
    LOG.debug("Found %d volumes"), len(volumes)
    for volume_ref in volumes:
        try:
            LOG.debug("Send exists notification for <volume_id: "
                      "%(volume_id)s> <project_id %(project_id)s> "
                      "<%(extra_info)s>",
                      {'volume_id': volume_ref.id,
                       'project_id': volume_ref.project_id,
                       'extra_info': extra_info})
            cinder.volume.utils.notify_about_volume_usage(
                admin_context,
                volume_ref,
                'exists', extra_usage_info=extra_info)
        except Exception as exc_msg:
            LOG.exception(_LE("Exists volume notification failed: %s"),
                          exc_msg, resource=volume_ref)

        if (CONF.send_actions and
                volume_ref.created_at > begin and
                volume_ref.created_at < end):
            try:
                local_extra_info = {
                    'audit_period_beginning': str(volume_ref.created_at),
                    'audit_period_ending': str(volume_ref.created_at),
                }
                LOG.debug("Send create notification for "
                          "<volume_id: %(volume_id)s> "
                          "<project_id %(project_id)s> <%(extra_info)s>",
                          {'volume_id': volume_ref.id,
                           'project_id': volume_ref.project_id,
                           'extra_info': local_extra_info})
                cinder.volume.utils.notify_about_volume_usage(
                    admin_context,
                    volume_ref,
                    'create.start', extra_usage_info=local_extra_info)
                cinder.volume.utils.notify_about_volume_usage(
                    admin_context,
                    volume_ref,
                    'create.end', extra_usage_info=local_extra_info)
            except Exception as exc_msg:
                LOG.exception(_LE("Create volume notification failed: %s"),
                              exc_msg, resource=volume_ref)

        if (CONF.send_actions and volume_ref.deleted_at and
                volume_ref.deleted_at > begin and
                volume_ref.deleted_at < end):
            try:
                local_extra_info = {
                    'audit_period_beginning': str(volume_ref.deleted_at),
                    'audit_period_ending': str(volume_ref.deleted_at),
                }
                LOG.debug("Send delete notification for "
                          "<volume_id: %(volume_id)s> "
                          "<project_id %(project_id)s> <%(extra_info)s>",
                          {'volume_id': volume_ref.id,
                           'project_id': volume_ref.project_id,
                           'extra_info': local_extra_info})
                cinder.volume.utils.notify_about_volume_usage(
                    admin_context,
                    volume_ref,
                    'delete.start', extra_usage_info=local_extra_info)
                cinder.volume.utils.notify_about_volume_usage(
                    admin_context,
                    volume_ref,
                    'delete.end', extra_usage_info=local_extra_info)
            except Exception as exc_msg:
                LOG.exception(_LE("Delete volume notification failed: %s"),
                              exc_msg, resource=volume_ref)

    snapshots = objects.SnapshotList.get_active_by_window(admin_context,
                                                          begin, end)
    LOG.debug("Found %d snapshots"), len(snapshots)
    for snapshot_ref in snapshots:
        try:
            LOG.debug("Send notification for <snapshot_id: %(snapshot_id)s> "
                      "<project_id %(project_id)s> <%(extra_info)s>",
                      {'snapshot_id': snapshot_ref.id,
                       'project_id': snapshot_ref.project_id,
                       'extra_info': extra_info})
            cinder.volume.utils.notify_about_snapshot_usage(admin_context,
                                                            snapshot_ref,
                                                            'exists',
                                                            extra_info)
        except Exception as exc_msg:
            LOG.exception(_LE("Exists snapshot notification failed: %s"),
                          exc_msg, resource=snapshot_ref)

        if (CONF.send_actions and
                snapshot_ref.created_at > begin and
                snapshot_ref.created_at < end):
            try:
                local_extra_info = {
                    'audit_period_beginning': str(snapshot_ref.created_at),
                    'audit_period_ending': str(snapshot_ref.created_at),
                }
                LOG.debug("Send create notification for "
                          "<snapshot_id: %(snapshot_id)s> "
                          "<project_id %(project_id)s> <%(extra_info)s>",
                          {'snapshot_id': snapshot_ref.id,
                           'project_id': snapshot_ref.project_id,
                           'extra_info': local_extra_info})
                cinder.volume.utils.notify_about_snapshot_usage(
                    admin_context,
                    snapshot_ref,
                    'create.start', extra_usage_info=local_extra_info)
                cinder.volume.utils.notify_about_snapshot_usage(
                    admin_context,
                    snapshot_ref,
                    'create.end', extra_usage_info=local_extra_info)
            except Exception as exc_msg:
                LOG.exception(_LE("Create snapshot notification failed: %s"),
                              exc_msg, resource=snapshot_ref)

        if (CONF.send_actions and snapshot_ref.deleted_at and
                snapshot_ref.deleted_at > begin and
                snapshot_ref.deleted_at < end):
            try:
                local_extra_info = {
                    'audit_period_beginning': str(snapshot_ref.deleted_at),
                    'audit_period_ending': str(snapshot_ref.deleted_at),
                }
                LOG.debug("Send delete notification for "
                          "<snapshot_id: %(snapshot_id)s> "
                          "<project_id %(project_id)s> <%(extra_info)s>",
                          {'snapshot_id': snapshot_ref.id,
                           'project_id': snapshot_ref.project_id,
                           'extra_info': local_extra_info})
                cinder.volume.utils.notify_about_snapshot_usage(
                    admin_context,
                    snapshot_ref,
                    'delete.start', extra_usage_info=local_extra_info)
                cinder.volume.utils.notify_about_snapshot_usage(
                    admin_context,
                    snapshot_ref,
                    'delete.end', extra_usage_info=local_extra_info)
            except Exception as exc_msg:
                LOG.exception(_LE("Delete snapshot notification failed: %s"),
                              exc_msg, resource=snapshot_ref)

    LOG.debug("Volume usage audit completed")
Пример #22
0
def main():
    objects.register_all()
    admin_context = context.get_admin_context()
    CONF(sys.argv[1:], project='cinder', version=version.version_string())
    logging.setup(CONF, "cinder")
    LOG = logging.getLogger("cinder")
    rpc.init(CONF)
    begin, end = utils.last_completed_audit_period()
    if CONF.start_time:
        begin = datetime.datetime.strptime(CONF.start_time,
                                           "%Y-%m-%d %H:%M:%S")
    if CONF.end_time:
        end = datetime.datetime.strptime(CONF.end_time, "%Y-%m-%d %H:%M:%S")
    begin = begin.replace(tzinfo=iso8601.Utc())
    end = end.replace(tzinfo=iso8601.Utc())
    if not end > begin:
        msg = _("The end time (%(end)s) must be after the start "
                "time (%(start)s).") % {
                    'start': begin,
                    'end': end
                }
        LOG.error(msg)
        sys.exit(-1)
    LOG.info(_LI("Starting volume usage audit"))
    msg = _LI("Creating usages for %(begin_period)s until %(end_period)s")
    LOG.info(msg, {"begin_period": str(begin), "end_period": str(end)})

    extra_info = {
        'audit_period_beginning': str(begin),
        'audit_period_ending': str(end),
    }

    volumes = objects.VolumeList.get_active_by_window(admin_context, begin,
                                                      end)
    LOG.info(_LI("Found %d volumes"), len(volumes))
    for volume_ref in volumes:
        try:
            cinder.volume.utils.notify_about_volume_usage(
                admin_context,
                volume_ref,
                'exists',
                extra_usage_info=extra_info)
            LOG.debug(
                "Sent exists notification for <volume_id: "
                "%(volume_id)s> <project_id %(project_id)s> "
                "<%(extra_info)s>", {
                    'volume_id': volume_ref.id,
                    'project_id': volume_ref.project_id,
                    'extra_info': extra_info
                })
        except Exception as exc_msg:
            LOG.exception(_LE("Exists volume notification failed: %s"),
                          exc_msg,
                          resource=volume_ref)

        if (CONF.send_actions and volume_ref.created_at > begin
                and volume_ref.created_at < end):
            try:
                local_extra_info = {
                    'audit_period_beginning': str(volume_ref.created_at),
                    'audit_period_ending': str(volume_ref.created_at),
                }
                cinder.volume.utils.notify_about_volume_usage(
                    admin_context,
                    volume_ref,
                    'create.start',
                    extra_usage_info=local_extra_info)
                cinder.volume.utils.notify_about_volume_usage(
                    admin_context,
                    volume_ref,
                    'create.end',
                    extra_usage_info=local_extra_info)
                LOG.debug(
                    "Sent create notification for "
                    "<volume_id: %(volume_id)s> "
                    "<project_id %(project_id)s> <%(extra_info)s>", {
                        'volume_id': volume_ref.id,
                        'project_id': volume_ref.project_id,
                        'extra_info': local_extra_info
                    })
            except Exception as exc_msg:
                LOG.exception(_LE("Create volume notification failed: %s"),
                              exc_msg,
                              resource=volume_ref)

        if (CONF.send_actions and volume_ref.deleted_at
                and volume_ref.deleted_at > begin
                and volume_ref.deleted_at < end):
            try:
                local_extra_info = {
                    'audit_period_beginning': str(volume_ref.deleted_at),
                    'audit_period_ending': str(volume_ref.deleted_at),
                }
                cinder.volume.utils.notify_about_volume_usage(
                    admin_context,
                    volume_ref,
                    'delete.start',
                    extra_usage_info=local_extra_info)
                cinder.volume.utils.notify_about_volume_usage(
                    admin_context,
                    volume_ref,
                    'delete.end',
                    extra_usage_info=local_extra_info)
                LOG.debug(
                    "Sent delete notification for "
                    "<volume_id: %(volume_id)s> "
                    "<project_id %(project_id)s> <%(extra_info)s>", {
                        'volume_id': volume_ref.id,
                        'project_id': volume_ref.project_id,
                        'extra_info': local_extra_info
                    })
            except Exception as exc_msg:
                LOG.exception(_LE("Delete volume notification failed: %s"),
                              exc_msg,
                              resource=volume_ref)

    snapshots = objects.SnapshotList.get_active_by_window(
        admin_context, begin, end)
    LOG.info(_LI("Found %d snapshots"), len(snapshots))
    for snapshot_ref in snapshots:
        try:
            cinder.volume.utils.notify_about_snapshot_usage(
                admin_context, snapshot_ref, 'exists', extra_info)
            LOG.debug(
                "Sent notification for <snapshot_id: %(snapshot_id)s> "
                "<project_id %(project_id)s> <%(extra_info)s>", {
                    'snapshot_id': snapshot_ref.id,
                    'project_id': snapshot_ref.project_id,
                    'extra_info': extra_info
                })
        except Exception as exc_msg:
            LOG.exception(_LE("Exists snapshot notification failed: %s"),
                          exc_msg,
                          resource=snapshot_ref)

        if (CONF.send_actions and snapshot_ref.created_at > begin
                and snapshot_ref.created_at < end):
            try:
                local_extra_info = {
                    'audit_period_beginning': str(snapshot_ref.created_at),
                    'audit_period_ending': str(snapshot_ref.created_at),
                }
                cinder.volume.utils.notify_about_snapshot_usage(
                    admin_context,
                    snapshot_ref,
                    'create.start',
                    extra_usage_info=local_extra_info)
                cinder.volume.utils.notify_about_snapshot_usage(
                    admin_context,
                    snapshot_ref,
                    'create.end',
                    extra_usage_info=local_extra_info)
                LOG.debug(
                    "Sent create notification for "
                    "<snapshot_id: %(snapshot_id)s> "
                    "<project_id %(project_id)s> <%(extra_info)s>", {
                        'snapshot_id': snapshot_ref.id,
                        'project_id': snapshot_ref.project_id,
                        'extra_info': local_extra_info
                    })
            except Exception as exc_msg:
                LOG.exception(_LE("Create snapshot notification failed: %s"),
                              exc_msg,
                              resource=snapshot_ref)

        if (CONF.send_actions and snapshot_ref.deleted_at
                and snapshot_ref.deleted_at > begin
                and snapshot_ref.deleted_at < end):
            try:
                local_extra_info = {
                    'audit_period_beginning': str(snapshot_ref.deleted_at),
                    'audit_period_ending': str(snapshot_ref.deleted_at),
                }
                cinder.volume.utils.notify_about_snapshot_usage(
                    admin_context,
                    snapshot_ref,
                    'delete.start',
                    extra_usage_info=local_extra_info)
                cinder.volume.utils.notify_about_snapshot_usage(
                    admin_context,
                    snapshot_ref,
                    'delete.end',
                    extra_usage_info=local_extra_info)
                LOG.debug(
                    "Sent delete notification for "
                    "<snapshot_id: %(snapshot_id)s> "
                    "<project_id %(project_id)s> <%(extra_info)s>", {
                        'snapshot_id': snapshot_ref.id,
                        'project_id': snapshot_ref.project_id,
                        'extra_info': local_extra_info
                    })
            except Exception as exc_msg:
                LOG.exception(_LE("Delete snapshot notification failed: %s"),
                              exc_msg,
                              resource=snapshot_ref)

    backups = objects.BackupList.get_active_by_window(admin_context, begin,
                                                      end)

    LOG.info(_LI("Found %d backups"), len(backups))
    for backup_ref in backups:
        try:
            cinder.volume.utils.notify_about_backup_usage(
                admin_context, backup_ref, 'exists', extra_info)
            LOG.debug(
                "Sent notification for <backup_id: %(backup_id)s> "
                "<project_id %(project_id)s> <%(extra_info)s>", {
                    'backup_id': backup_ref.id,
                    'project_id': backup_ref.project_id,
                    'extra_info': extra_info
                })
        except Exception as exc_msg:
            LOG.error(_LE("Exists backups notification failed: %s"), exc_msg)

        if (CONF.send_actions and backup_ref.created_at > begin
                and backup_ref.created_at < end):
            try:
                local_extra_info = {
                    'audit_period_beginning': str(backup_ref.created_at),
                    'audit_period_ending': str(backup_ref.created_at),
                }
                cinder.volume.utils.notify_about_backup_usage(
                    admin_context,
                    backup_ref,
                    'create.start',
                    extra_usage_info=local_extra_info)
                cinder.volume.utils.notify_about_backup_usage(
                    admin_context,
                    backup_ref,
                    'create.end',
                    extra_usage_info=local_extra_info)
                LOG.debug(
                    "Sent create notification for "
                    "<backup_id: %(backup_id)s> "
                    "<project_id %(project_id)s> <%(extra_info)s>", {
                        'backup_id': backup_ref.id,
                        'project_id': backup_ref.project_id,
                        'extra_info': local_extra_info
                    })
            except Exception as exc_msg:
                LOG.error(_LE("Create backup notification failed: %s"),
                          exc_msg)

        if (CONF.send_actions and backup_ref.deleted_at
                and backup_ref.deleted_at > begin
                and backup_ref.deleted_at < end):
            try:
                local_extra_info = {
                    'audit_period_beginning': str(backup_ref.deleted_at),
                    'audit_period_ending': str(backup_ref.deleted_at),
                }
                cinder.volume.utils.notify_about_backup_usage(
                    admin_context,
                    backup_ref,
                    'delete.start',
                    extra_usage_info=local_extra_info)
                cinder.volume.utils.notify_about_backup_usage(
                    admin_context,
                    backup_ref,
                    'delete.end',
                    extra_usage_info=local_extra_info)
                LOG.debug(
                    "Sent delete notification for "
                    "<backup_id: %(backup_id)s> "
                    "<project_id %(project_id)s> <%(extra_info)s>", {
                        'backup_id': backup_ref.id,
                        'project_id': backup_ref.project_id,
                        'extra_info': local_extra_info
                    })
            except Exception as exc_msg:
                LOG.error(_LE("Delete backup notification failed: %s"),
                          exc_msg)

    LOG.info(_LI("Volume usage audit completed"))
Пример #23
0
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.

###################################################################
# WARNING!
#
# Do not edit this file directly. This file should be generated by
# running the command "tox -e genopts" any time a config option
# has been added, changed, or removed.
###################################################################

import itertools

from cinder import objects
objects.register_all()

from cinder.api import common as cinder_api_common
from cinder.api.contrib import types_extra_specs as \
    cinder_api_contrib_typesextraspecs
from cinder.api.middleware import auth as cinder_api_middleware_auth
from cinder.api.views import versions as cinder_api_views_versions
from cinder.backup import api as cinder_backup_api
from cinder.backup import chunkeddriver as cinder_backup_chunkeddriver
from cinder.backup import driver as cinder_backup_driver
from cinder.backup.drivers import ceph as cinder_backup_drivers_ceph
from cinder.backup.drivers import glusterfs as cinder_backup_drivers_glusterfs
from cinder.backup.drivers import google as cinder_backup_drivers_google
from cinder.backup.drivers import nfs as cinder_backup_drivers_nfs
from cinder.backup.drivers import posix as cinder_backup_drivers_posix
from cinder.backup.drivers import swift as cinder_backup_drivers_swift
Пример #24
0
    def setUp(self):
        """Run before each test method to initialize test environment."""
        super(TestCase, self).setUp()

        # Import cinder objects for test cases
        objects.register_all()

        # Unit tests do not need to use lazy gettext
        i18n.enable_lazy(False)

        test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0)
        try:
            test_timeout = int(test_timeout)
        except ValueError:
            # If timeout value is invalid do not set a timeout.
            test_timeout = 0
        if test_timeout > 0:
            self.useFixture(fixtures.Timeout(test_timeout, gentle=True))
        self.useFixture(fixtures.NestedTempfile())
        self.useFixture(fixtures.TempHomeDir())

        environ_enabled = (lambda var_name:
                           strutils.bool_from_string(os.environ.get(var_name)))
        if environ_enabled('OS_STDOUT_CAPTURE'):
            stdout = self.useFixture(fixtures.StringStream('stdout')).stream
            self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
        if environ_enabled('OS_STDERR_CAPTURE'):
            stderr = self.useFixture(fixtures.StringStream('stderr')).stream
            self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))
        if environ_enabled('OS_LOG_CAPTURE'):
            log_format = '%(levelname)s [%(name)s] %(message)s'
            if environ_enabled('OS_DEBUG'):
                level = logging.DEBUG
            else:
                level = logging.INFO
            self.useFixture(fixtures.LoggerFixture(nuke_handlers=False,
                                                   format=log_format,
                                                   level=level))

        rpc.add_extra_exmods("cinder.tests.unit")
        self.addCleanup(rpc.clear_extra_exmods)
        self.addCleanup(rpc.cleanup)

        self.messaging_conf = messaging_conffixture.ConfFixture(CONF)
        self.messaging_conf.transport_driver = 'fake'
        self.messaging_conf.response_timeout = 15
        self.useFixture(self.messaging_conf)
        rpc.init(CONF)

        conf_fixture.set_defaults(CONF)
        CONF([], default_config_files=[])

        # NOTE(vish): We need a better method for creating fixtures for tests
        #             now that we have some required db setup for the system
        #             to work properly.
        self.start = timeutils.utcnow()

        CONF.set_default('connection', 'sqlite://', 'database')
        CONF.set_default('sqlite_synchronous', False, 'database')

        global _DB_CACHE
        if not _DB_CACHE:
            _DB_CACHE = Database(sqla_api, migration,
                                 sql_connection=CONF.database.connection,
                                 sqlite_db=CONF.database.sqlite_db,
                                 sqlite_clean_db=CONF.sqlite_clean_db)
        self.useFixture(_DB_CACHE)

        # emulate some of the mox stuff, we can't use the metaclass
        # because it screws with our generators
        self.mox = mox.Mox()
        self.stubs = stubout.StubOutForTesting()
        self.addCleanup(CONF.reset)
        self.addCleanup(self.mox.UnsetStubs)
        self.addCleanup(self.stubs.UnsetAll)
        self.addCleanup(self.stubs.SmartUnsetAll)
        self.addCleanup(self.mox.VerifyAll)
        self.addCleanup(self._common_cleanup)
        self.injected = []
        self._services = []

        fake_notifier.stub_notifier(self.stubs)

        self.override_config('fatal_exception_format_errors', True)
        # This will be cleaned up by the NestedTempfile fixture
        lock_path = self.useFixture(fixtures.TempDir()).path
        self.fixture = self.useFixture(
            config_fixture.Config(lockutils.CONF))
        self.fixture.config(lock_path=lock_path,
                            group='oslo_concurrency')
        lockutils.set_defaults(lock_path)
        self.override_config('policy_file',
                             os.path.join(
                                 os.path.abspath(
                                     os.path.join(
                                         os.path.dirname(__file__),
                                         '..',
                                     )
                                 ),
                                 'cinder/tests/unit/policy.json'))
Пример #25
0
#    under the License.

"""Generate list of Cinder drivers"""

import argparse
import operator
import os
import json
import textwrap

from cinder.interface import util
from cinder import objects


# Object loading can cause issues loading drivers, force it up front
objects.register_all()


parser = argparse.ArgumentParser(prog="generate_driver_list")

parser.add_argument("--format", default='str', choices=['str', 'dict'],
                    help="Output format type")

# Keep backwards compatibilty with the gate-docs test
# The tests pass ['docs'] on the cmdln, but it's never been used.
parser.add_argument("output_list", default=None, nargs='?')

CI_WIKI_ROOT = "https://wiki.openstack.org/wiki/ThirdPartySystems/"


class Output(object):
 def setUp(self):
     super(VolumeGlanceMetadataTestCase, self).setUp()
     self.ctxt = context.get_admin_context()
     objects.register_all()
Пример #27
0
 def setUp(self):
     super(VolumeGlanceMetadataTestCase, self).setUp()
     self.ctxt = context.get_admin_context()
     objects.register_all()