Пример #1
0
    def wrapped_func(*args, **kwarg):
        if fn.__name__ == 'create':
            LOG = logging.getLogger(fn.__module__)
            try:
                idx_svol = inspect.getargspec(fn).args.index("source_volume")
                idx_voltp = inspect.getargspec(fn).args.index("volume_type")
            except Exception as e:
                LOG.error(_("cinder volume_create interface has been changed. "
                            "PowerVC monkey patch for volume clone won't work."
                            " function: %(fx)s, arguments: %(args)s") %
                          dict(fx=fn.__module__,
                               args=inspect.getargspec(fn).args))
                raise e

            svol = kwarg.get('source_volume')
            voltp = kwarg.get('volume_type')

            if svol and voltp:
                if svol['volume_type_id'] != voltp['id']:
                    # this is the condition that will trigger the
                    # volume clone to fail. Patch it here.
                    svol['volume_type_id'] = voltp['id']
                    LOG.info(_("Monkey patched volume clone by paxes "
                               "volume_create_decorator(). Source Volume ID: "
                               "%(svol)s, volume type name: %(voltpnm)s, "
                               "volume type ID: %(voltpid)s") %
                             dict(svol=svol['id'],
                                  voltpnm=voltp['name'],
                                  voltpid=voltp['id']))
                    return fn(*args, **kwarg)

        return fn(*args, **kwarg)
Пример #2
0
    def __init__(self, name, app, host=None, port=None, pool_size=None,
                 protocol=eventlet.wsgi.HttpProtocol, backlog=128):
        """Initialize, but do not start, a WSGI server.

        :param name: Pretty name for logging.
        :param app: The WSGI application to serve.
        :param host: IP address to serve the application.
        :param port: Port number to server the application.
        :param pool_size: Maximum number of eventlets to spawn concurrently.
        :returns: None

        """
        # Allow operators to customize http requests max header line size.
        eventlet.wsgi.MAX_HEADER_LINE = CONF.max_header_line
        self.name = name
        self.app = app
        self._host = host or "0.0.0.0"
        self._port = port or 0
        self._server = None
        self._socket = None
        self._protocol = protocol
        self._pool = eventlet.GreenPool(pool_size or self.default_pool_size)
        self._logger = logging.getLogger("eventlet.wsgi.server")
        self._wsgi_logger = logging.WritableLogger(self._logger)

        if backlog < 1:
            raise exception.InvalidInput(
                reason='The backlog must be more than 1')
        self._socket = self._get_socket(self._host,
                                        self._port,
                                        backlog=backlog)
Пример #3
0
def notify(_context, message):
    """Notifies the recipient of the desired event given the model.
    Log notifications using openstack's default logging system"""

    priority = message.get("priority", CONF.default_notification_level)
    priority = priority.lower()
    logger = logging.getLogger("openstack.common.notification.%s" % message["event_type"])
    getattr(logger, priority)(json.dumps(message))
Пример #4
0
def notify(message):
    """Notifies the recipient of the desired event given the model.
    Log notifications using cinder's default logging system"""

    priority = message.get('priority',
                           FLAGS.default_notification_level)
    priority = priority.lower()
    logger = logging.getLogger(
            'cinder.notification.%s' % message['event_type'])
    getattr(logger, priority)(jsonutils.dumps(message))
Пример #5
0
def notify(_context, message):
    """Notifies the recipient of the desired event given the model.

    Log notifications using OpenStack's default logging system.
    """

    priority = message.get('priority',
                           CONF.default_notification_level)
    priority = priority.lower()
    logger = logging.getLogger(
        'cinder.openstack.common.notification.%s' %
        message['event_type'])
    getattr(logger, priority)(jsonutils.dumps(message))
Пример #6
0
    def setUp(self):
        super(DriverTestCase, self).setUp()
        self.flags(volume_driver=self.driver_name,
                   logging_default_format_string="%(message)s")
        self.volume = importutils.import_object(FLAGS.volume_manager)
        self.context = context.get_admin_context()
        self.output = ""

        def _fake_execute(_command, *_args, **_kwargs):
            """Fake _execute."""
            return self.output, None
        self.volume.driver.set_execute(_fake_execute)

        log = logging.getLogger()
        self.stream = cStringIO.StringIO()
        log.logger.addHandler(logging.logging.StreamHandler(self.stream))
Пример #7
0
    def test_error_notification(self):
        self.stubs.Set(cinder.flags.FLAGS, 'notification_driver',
            'cinder.notifier.rabbit_notifier')
        self.stubs.Set(cinder.flags.FLAGS, 'publish_errors', True)
        LOG = logging.getLogger('cinder')
        logging.setup("cinder")
        msgs = []

        def mock_notify(context, topic, data):
            msgs.append(data)

        self.stubs.Set(cinder.openstack.common.rpc, 'notify', mock_notify)
        LOG.error('foo')
        self.assertEqual(1, len(msgs))
        msg = msgs[0]
        self.assertEqual(msg['event_type'], 'error_notification')
        self.assertEqual(msg['priority'], 'ERROR')
        self.assertEqual(msg['payload']['error'], 'foo')
Пример #8
0
    def wrapped_func(*args, **kwarg):
        if fn.__name__ == 'create':
            # cinder.volume.volume_types.create() decorator
            r = fn(*args, **kwarg)
            LOG = logging.getLogger(fn.__module__)
            try:
                idx_specs = inspect.getargspec(fn).args.index('extra_specs')
                idx_name = inspect.getargspec(fn).args.index('name')
                idx_ctxt = inspect.getargspec(fn).args.index('context')
            except Exception as e:
                LOG.warn(_("Failed to get the parameters from function "
                           "cinder.volume.volume_types.create(). Default "
                           "quota didn't set for the storage template. "
                           "Error: %(err)s") % dict(err=e))
                # Just return. Don't set the storage template default quota.
                return r

            volume_type = args[idx_name]
            extra_specs = args[idx_specs]
            ctxt = args[idx_ctxt]
            volume_host = None
            if extra_specs and isinstance(extra_specs, dict):
                volume_host = extra_specs.get(
                    "capabilities:volume_backend_name", None)

            if volume_host and volume_type and ctxt:
                volume_rpcapi = volume_rpc.VolumeAPIProduct()
                try:
                    volume_rpcapi.set_volume_type_quota(
                        ctxt, volume_host, volume_type)
                    LOG.info(_("Successfully set default quota for storage "
                               "template %(vol_type)s") %
                             dict(vol_type=volume_type))
                except Exception as e:
                    LOG.warn(_("Failed to set default quota for storage "
                               "template %(vol_type)s, error: %(err)s") %
                             dict(vol_type=volume_type, err=e))
            else:
                LOG.warn(_("Cannot set default quota for storage template "
                           "%(vol_type)s due to invalid Parameters from volume "
                           "type create.") % dict(vol_type=volume_type))
            return r
        else:
            return fn(*args, **kwarg)
Пример #9
0
Файл: all.py Проект: Qeas/cinder
def main():
    CONF(sys.argv[1:], project='cinder',
         version=version.version_string())
    logging.setup("cinder")
    LOG = logging.getLogger('cinder.all')

    utils.monkey_patch()
    launcher = service.process_launcher()
    # cinder-api
    try:
        server = service.WSGIService('osapi_volume')
        launcher.launch_service(server, workers=server.workers or 1)
    except (Exception, SystemExit):
        LOG.exception(_LE('Failed to load osapi_volume'))

    for binary in ['cinder-volume', 'cinder-scheduler', 'cinder-backup']:
        try:
            launcher.launch_service(service.Service.create(binary=binary))
        except (Exception, SystemExit):
            LOG.exception(_LE('Failed to load %s'), binary)
    launcher.wait()
Пример #10
0
    def __init__(self, name, app, host=None, port=None, pool_size=None, protocol=eventlet.wsgi.HttpProtocol):
        """Initialize, but do not start, a WSGI server.

        :param name: Pretty name for logging.
        :param app: The WSGI application to serve.
        :param host: IP address to serve the application.
        :param port: Port number to server the application.
        :param pool_size: Maximum number of eventlets to spawn concurrently.
        :returns: None

        """
        self.name = name
        self.app = app
        self._host = host or "0.0.0.0"
        self._port = port or 0
        self._server = None
        self._socket = None
        self._protocol = protocol
        self._pool = eventlet.GreenPool(pool_size or self.default_pool_size)
        self._logger = logging.getLogger("eventlet.wsgi.server")
        self._wsgi_logger = logging.WritableLogger(self._logger)
Пример #11
0
    def test_create_consistencygroup_exceptions(self):
        with mock.patch.object(filter_scheduler.FilterScheduler,
                               'schedule_create_consistencygroup') as mock_cg:
            original_driver = self.manager.driver
            self.manager.driver = filter_scheduler.FilterScheduler
            LOG = logging.getLogger('cinder.scheduler.manager')
            self.stubs.Set(LOG, 'error', mock.Mock())
            self.stubs.Set(LOG, 'exception', mock.Mock())
            self.stubs.Set(db, 'consistencygroup_update', mock.Mock())

            ex = exception.CinderException('test')
            mock_cg.side_effect = ex
            group_id = '1'
            self.assertRaises(exception.CinderException,
                              self.manager.create_consistencygroup,
                              self.context,
                              'volume',
                              group_id)
            LOG.exception.assert_called_once_with(_(
                "Failed to create consistency group "
                "%(group_id)s."), {'group_id': group_id})
            db.consistencygroup_update.assert_called_once_with(
                self.context, group_id, {'status': 'error'})

            mock_cg.reset_mock()
            LOG.exception.reset_mock()
            db.consistencygroup_update.reset_mock()

            mock_cg.side_effect = exception.NoValidHost(
                reason="No weighed hosts available")
            self.manager.create_consistencygroup(
                self.context, 'volume', group_id)
            LOG.error.assert_called_once_with(_(
                "Could not find a host for consistency group "
                "%(group_id)s.") % {'group_id': group_id})
            db.consistencygroup_update.assert_called_once_with(
                self.context, group_id, {'status': 'error'})

            self.manager.driver = original_driver
Пример #12
0
    def setUp(self):
        super(NotifierListTestCase, self).setUp()
        list_notifier._reset_drivers()
        # Mock log to add one to exception_count when log.exception is called

        def mock_exception(cls, *args):
            self.exception_count += 1

        self.exception_count = 0
        list_notifier_log = logging.getLogger('cinder.notifier.list_notifier')
        self.stubs.Set(list_notifier_log, "exception", mock_exception)
        # Mock no_op notifier to add one to notify_count when called.

        def mock_notify(cls, *args):
            self.notify_count += 1

        self.notify_count = 0
        self.stubs.Set(cinder.notifier.no_op_notifier, 'notify', mock_notify)
        # Mock log_notifier to raise RuntimeError when called.

        def mock_notify2(cls, *args):
            raise RuntimeError("Bad notifier.")

        self.stubs.Set(cinder.notifier.log_notifier, 'notify', mock_notify2)
Пример #13
0
 def log_level(self, level):
     """Set logging level to the specified value."""
     log_root = logging.getLogger(None).logger
     log_root.setLevel(level)
Пример #14
0
    def __init__(self,
                 name,
                 app,
                 host=None,
                 port=None,
                 pool_size=None,
                 protocol=eventlet.wsgi.HttpProtocol,
                 backlog=128):
        """Initialize, but do not start, a WSGI server.

        :param name: Pretty name for logging.
        :param app: The WSGI application to serve.
        :param host: IP address to serve the application.
        :param port: Port number to server the application.
        :param pool_size: Maximum number of eventlets to spawn concurrently.
        :returns: None

        """
        # Allow operators to customize http requests max header line size.
        eventlet.wsgi.MAX_HEADER_LINE = CONF.max_header_line
        self.name = name
        self.app = app
        self._host = host or "0.0.0.0"
        self._port = port or 0
        self._server = None
        self._socket = None
        self._protocol = protocol
        self.pool_size = pool_size or self.default_pool_size
        self._pool = eventlet.GreenPool(self.pool_size)
        self._logger = logging.getLogger("eventlet.wsgi.server")
        self._wsgi_logger = logging.WritableLogger(self._logger)

        if backlog < 1:
            raise exception.InvalidInput(
                reason='The backlog must be more than 1')

        bind_addr = (host, port)
        # TODO(dims): eventlet's green dns/socket module does not actually
        # support IPv6 in getaddrinfo(). We need to get around this in the
        # future or monitor upstream for a fix
        try:
            info = socket.getaddrinfo(bind_addr[0], bind_addr[1],
                                      socket.AF_UNSPEC, socket.SOCK_STREAM)[0]
            family = info[0]
            bind_addr = info[-1]
        except Exception:
            family = socket.AF_INET

        cert_file = CONF.ssl_cert_file
        key_file = CONF.ssl_key_file
        ca_file = CONF.ssl_ca_file
        self._use_ssl = cert_file or key_file

        if cert_file and not os.path.exists(cert_file):
            raise RuntimeError(_("Unable to find cert_file : %s") % cert_file)

        if ca_file and not os.path.exists(ca_file):
            raise RuntimeError(_("Unable to find ca_file : %s") % ca_file)

        if key_file and not os.path.exists(key_file):
            raise RuntimeError(_("Unable to find key_file : %s") % key_file)

        if self._use_ssl and (not cert_file or not key_file):
            raise RuntimeError(
                _("When running server in SSL mode, you "
                  "must specify both a cert_file and "
                  "key_file option value in your "
                  "configuration file."))

        retry_until = time.time() + 30
        while not self._socket and time.time() < retry_until:
            try:
                self._socket = eventlet.listen(bind_addr,
                                               backlog=backlog,
                                               family=family)
            except socket.error as err:
                if err.args[0] != errno.EADDRINUSE:
                    raise
                eventlet.sleep(0.1)

        if not self._socket:
            raise RuntimeError(
                _("Could not bind to %(host)s:%(port)s "
                  "after trying for 30 seconds") % {
                      'host': host,
                      'port': port
                  })

        (self._host, self._port) = self._socket.getsockname()[0:2]
        LOG.info(
            _("%(name)s listening on %(_host)s:%(_port)s") % self.__dict__)
Пример #15
0
def main():
    admin_context = context.get_admin_context()
    CONF(sys.argv[1:], project='cinder',
         version=version.version_string())
    logging.setup("cinder")
    LOG = logging.getLogger("cinder")
    rpc.init(CONF)
    begin, end = utils.last_completed_audit_period()
    if CONF.start_time:
        begin = datetime.strptime(CONF.start_time, "%Y-%m-%d %H:%M:%S")
    if CONF.end_time:
        end = datetime.strptime(CONF.end_time, "%Y-%m-%d %H:%M:%S")
    if not end > begin:
        msg = _("The end time (%(end)s) must be after the start "
                "time (%(start)s).") % {'start': begin,
                                        'end': end}
        print(msg)
        LOG.error(msg)
        sys.exit(-1)
    print(_("Starting volume usage audit"))
    msg = _("Creating usages for %(begin_period)s until %(end_period)s")
    print(msg % {"begin_period": str(begin), "end_period": str(end)})

    extra_info = {
        'audit_period_beginning': str(begin),
        'audit_period_ending': str(end),
    }

    volumes = db.volume_get_active_by_window(admin_context,
                                             begin,
                                             end)
    print(_("Found %d volumes") % len(volumes))
    for volume_ref in volumes:
        try:
            LOG.debug("Send exists notification for <volume_id: "
                      "%(volume_id)s> <project_id %(project_id)s> "
                      "<%(extra_info)s>" %
                      {'volume_id': volume_ref.id,
                       'project_id': volume_ref.project_id,
                       'extra_info': extra_info})
            cinder.volume.utils.notify_about_volume_usage(
                admin_context,
                volume_ref,
                'exists', extra_usage_info=extra_info)
        except Exception as e:
            LOG.error(_LE("Failed to send exists notification"
                          " for volume %s.") %
                      volume_ref.id)
            print(traceback.format_exc(e))

        if (CONF.send_actions and
                volume_ref.created_at > begin and
                volume_ref.created_at < end):
            try:
                local_extra_info = {
                    'audit_period_beginning': str(volume_ref.created_at),
                    'audit_period_ending': str(volume_ref.created_at),
                }
                LOG.debug("Send create notification for "
                          "<volume_id: %(volume_id)s> "
                          "<project_id %(project_id)s> <%(extra_info)s>" %
                          {'volume_id': volume_ref.id,
                           'project_id': volume_ref.project_id,
                           'extra_info': local_extra_info})
                cinder.volume.utils.notify_about_volume_usage(
                    admin_context,
                    volume_ref,
                    'create.start', extra_usage_info=local_extra_info)
                cinder.volume.utils.notify_about_volume_usage(
                    admin_context,
                    volume_ref,
                    'create.end', extra_usage_info=local_extra_info)
            except Exception as e:
                LOG.error(_LE("Failed to send create notification for "
                              "volume %s.") % volume_ref.id)
                print(traceback.format_exc(e))

        if (CONF.send_actions and volume_ref.deleted_at and
                volume_ref.deleted_at > begin and
                volume_ref.deleted_at < end):
            try:
                local_extra_info = {
                    'audit_period_beginning': str(volume_ref.deleted_at),
                    'audit_period_ending': str(volume_ref.deleted_at),
                }
                LOG.debug("Send delete notification for "
                          "<volume_id: %(volume_id)s> "
                          "<project_id %(project_id)s> <%(extra_info)s>" %
                          {'volume_id': volume_ref.id,
                           'project_id': volume_ref.project_id,
                           'extra_info': local_extra_info})
                cinder.volume.utils.notify_about_volume_usage(
                    admin_context,
                    volume_ref,
                    'delete.start', extra_usage_info=local_extra_info)
                cinder.volume.utils.notify_about_volume_usage(
                    admin_context,
                    volume_ref,
                    'delete.end', extra_usage_info=local_extra_info)
            except Exception as e:
                LOG.error(_LE("Failed to send delete notification for volume "
                              "%s.") % volume_ref.id)
                print(traceback.format_exc(e))

    snapshots = db.snapshot_get_active_by_window(admin_context,
                                                 begin,
                                                 end)
    print(_("Found %d snapshots") % len(snapshots))
    for snapshot_ref in snapshots:
        try:
            LOG.debug("Send notification for <snapshot_id: %(snapshot_id)s> "
                      "<project_id %(project_id)s> <%(extra_info)s>" %
                      {'snapshot_id': snapshot_ref.id,
                       'project_id': snapshot_ref.project_id,
                       'extra_info': extra_info})
            cinder.volume.utils.notify_about_snapshot_usage(admin_context,
                                                            snapshot_ref,
                                                            'exists',
                                                            extra_info)
        except Exception as e:
            LOG.error(_LE("Failed to send exists notification "
                          "for snapshot %s.")
                      % snapshot_ref.id)
            print(traceback.format_exc(e))

        if (CONF.send_actions and
                snapshot_ref.created_at > begin and
                snapshot_ref.created_at < end):
            try:
                local_extra_info = {
                    'audit_period_beginning': str(snapshot_ref.created_at),
                    'audit_period_ending': str(snapshot_ref.created_at),
                }
                LOG.debug("Send create notification for "
                          "<snapshot_id: %(snapshot_id)s> "
                          "<project_id %(project_id)s> <%(extra_info)s>" %
                          {'snapshot_id': snapshot_ref.id,
                           'project_id': snapshot_ref.project_id,
                           'extra_info': local_extra_info})
                cinder.volume.utils.notify_about_snapshot_usage(
                    admin_context,
                    snapshot_ref,
                    'create.start', extra_usage_info=local_extra_info)
                cinder.volume.utils.notify_about_snapshot_usage(
                    admin_context,
                    snapshot_ref,
                    'create.end', extra_usage_info=local_extra_info)
            except Exception as e:
                LOG.error(_LE("Failed to send create notification for snapshot"
                              "%s.") % snapshot_ref.id)
                print(traceback.format_exc(e))

        if (CONF.send_actions and snapshot_ref.deleted_at and
                snapshot_ref.deleted_at > begin and
                snapshot_ref.deleted_at < end):
            try:
                local_extra_info = {
                    'audit_period_beginning': str(snapshot_ref.deleted_at),
                    'audit_period_ending': str(snapshot_ref.deleted_at),
                }
                LOG.debug("Send delete notification for "
                          "<snapshot_id: %(snapshot_id)s> "
                          "<project_id %(project_id)s> <%(extra_info)s>" %
                          {'snapshot_id': snapshot_ref.id,
                           'project_id': snapshot_ref.project_id,
                           'extra_info': local_extra_info})
                cinder.volume.utils.notify_about_snapshot_usage(
                    admin_context,
                    snapshot_ref,
                    'delete.start', extra_usage_info=local_extra_info)
                cinder.volume.utils.notify_about_snapshot_usage(
                    admin_context,
                    snapshot_ref,
                    'delete.end', extra_usage_info=local_extra_info)
            except Exception as e:
                LOG.error(_LE("Failed to send delete notification for snapshot"
                              "%s.") % snapshot_ref.id)
                print(traceback.format_exc(e))

    print(_("Volume usage audit completed"))
Пример #16
0
    def __init__(self, name, app, host=None, port=None, pool_size=None,
                 protocol=eventlet.wsgi.HttpProtocol, backlog=128):
        """Initialize, but do not start, a WSGI server.

        :param name: Pretty name for logging.
        :param app: The WSGI application to serve.
        :param host: IP address to serve the application.
        :param port: Port number to server the application.
        :param pool_size: Maximum number of eventlets to spawn concurrently.
        :returns: None

        """
        # Allow operators to customize http requests max header line size.
        eventlet.wsgi.MAX_HEADER_LINE = CONF.max_header_line
        self.name = name
        self.app = app
        self._host = host or "0.0.0.0"
        self._port = port or 0
        self._server = None
        self._socket = None
        self._protocol = protocol
        self.pool_size = pool_size or self.default_pool_size
        self._pool = eventlet.GreenPool(self.pool_size)
        self._logger = logging.getLogger("eventlet.wsgi.server")
        self._wsgi_logger = logging.WritableLogger(self._logger)

        if backlog < 1:
            raise exception.InvalidInput(
                reason='The backlog must be more than 1')

        bind_addr = (host, port)
        # TODO(dims): eventlet's green dns/socket module does not actually
        # support IPv6 in getaddrinfo(). We need to get around this in the
        # future or monitor upstream for a fix
        try:
            info = socket.getaddrinfo(bind_addr[0],
                                      bind_addr[1],
                                      socket.AF_UNSPEC,
                                      socket.SOCK_STREAM)[0]
            family = info[0]
            bind_addr = info[-1]
        except Exception:
            family = socket.AF_INET

        cert_file = CONF.ssl_cert_file
        key_file = CONF.ssl_key_file
        ca_file = CONF.ssl_ca_file
        self._use_ssl = cert_file or key_file

        if cert_file and not os.path.exists(cert_file):
            raise RuntimeError(_("Unable to find cert_file : %s")
                               % cert_file)

        if ca_file and not os.path.exists(ca_file):
            raise RuntimeError(_("Unable to find ca_file : %s") % ca_file)

        if key_file and not os.path.exists(key_file):
            raise RuntimeError(_("Unable to find key_file : %s")
                               % key_file)

        if self._use_ssl and (not cert_file or not key_file):
            raise RuntimeError(_("When running server in SSL mode, you "
                                 "must specify both a cert_file and "
                                 "key_file option value in your "
                                 "configuration file."))

        retry_until = time.time() + 30
        while not self._socket and time.time() < retry_until:
            try:
                self._socket = eventlet.listen(bind_addr, backlog=backlog,
                                               family=family)
            except socket.error as err:
                if err.args[0] != errno.EADDRINUSE:
                    raise
                eventlet.sleep(0.1)

        if not self._socket:
            raise RuntimeError(_("Could not bind to %(host)s:%(port)s "
                               "after trying for 30 seconds") %
                               {'host': host, 'port': port})

        (self._host, self._port) = self._socket.getsockname()[0:2]
        LOG.info(_("%(name)s listening on %(_host)s:%(_port)s") %
                 {'name': self.name, '_host': self._host, '_port': self._port})
Пример #17
0
import os
import subprocess
import uuid

from migrate.versioning import repository
import six.moves.urllib.parse as urlparse
import sqlalchemy
import testtools

import cinder.db.migration as migration
import cinder.db.sqlalchemy.migrate_repo
from cinder.db.sqlalchemy.migration import versioning_api as migration_api
from cinder.openstack.common import log as logging
from cinder import test

LOG = logging.getLogger('cinder.tests.test_migrations')


def _get_connect_string(backend,
                        user="******",
                        passwd="openstack_citest",
                        database="openstack_citest"):
    """Return connect string.

    Try to get a connection with a very specific set of values, if we get
    these then we'll run the tests, otherwise they are skipped.
    """
    if backend == "postgres":
        backend = "postgresql+psycopg2"

    return ("%(backend)s://%(user)s:%(passwd)s@localhost/%(database)s" %
Пример #18
0
#    under the License.
"""
:mod:`nexenta.jsonrpc` -- Nexenta-specific JSON RPC client
=====================================================================

.. automodule:: nexenta.jsonrpc
.. moduleauthor:: Yuriy Taraday <*****@*****.**>
"""

import urllib2

from cinder.openstack.common import jsonutils
from cinder.volume import nexenta
from cinder.openstack.common import log as logging

LOG = logging.getLogger("cinder.volume.nexenta.jsonrpc")


class NexentaJSONException(nexenta.NexentaException):
    pass


class NexentaJSONProxy(object):
    def __init__(self, url, user, password, auto=False, obj=None, method=None):
        self.url = url
        self.user = user
        self.password = password
        self.auto = auto
        self.obj = obj
        self.method = method
Пример #19
0
#    under the License.
"""
:mod:`nexenta.jsonrpc` -- Nexenta-specific JSON RPC client
=====================================================================

.. automodule:: nexenta.jsonrpc
.. moduleauthor:: Yuriy Taraday <*****@*****.**>
"""

import json
import urllib2

from cinder.volume import nexenta
from cinder.openstack.common import log as logging

LOG = logging.getLogger("cinder.volume.nexenta.jsonrpc")


class NexentaJSONException(nexenta.NexentaException):
    pass


class NexentaJSONProxy(object):
    def __init__(self, url, user, password, auto=False, obj=None, method=None):
        self.url = url
        self.user = user
        self.password = password
        self.auto = auto
        self.obj = obj
        self.method = method
Пример #20
0
    pass

from cinder import db, quota, exception
from cinder.image import glance
from cinder.openstack.common import excutils
from cinder.openstack.common import timeutils, log as logging
from cinder.volume import volume_types
from cinder.context import get_admin_context

from lunr.cinder.client import LunrClient, LunrError
from lunr.cinder.utils import initialize_connection, terminate_connection, \
        attach_volume, detach_volume
from lunr.cinder.flags import CONF


LOG = logging.getLogger('cinder.lunr.lunrrpc')
QUOTAS = quota.QUOTAS


class LunrRPC(object):

    def __init__(self):
        pass

    def create_consumer(self, *args, **kwargs):
        pass

    def consume_in_thread(self, *args, **kwargs):
        pass

    def _get_volume_type_id(self, volume_type_name):
Пример #21
0
"""
import os
import sys

from cinder import exception
from cinder import flags
from cinder.openstack.common import cfg
from cinder.openstack.common import log as logging
from cinder.volume import driver

# Check needed for unit testing on Unix
if os.name == 'nt':
    import wmi

LOG = logging.getLogger("cinder.volume.windows.volume")

FLAGS = flags.FLAGS

windows_opts = [
    cfg.StrOpt('windows_iscsi_lun_path',
               default='C:\iSCSIVirtualDisks',
               help='Path to store VHD backed volumes'),
]

FLAGS.register_opts(windows_opts)


class WindowsDriver(driver.ISCSIDriver):
    """Executes volume driver commands on Windows Storage server."""
    def __init__(self):
Пример #22
0
"""
import os
import sys

from cinder import exception
from cinder import flags
from cinder.openstack.common import cfg
from cinder.openstack.common import log as logging
from cinder.volume import driver

# Check needed for unit testing on Unix
if os.name == 'nt':
    import wmi


LOG = logging.getLogger("cinder.volume.windows.volume")

FLAGS = flags.FLAGS

windows_opts = [
    cfg.StrOpt('windows_iscsi_lun_path',
              default='C:\iSCSIVirtualDisks',
              help='Path to store VHD backed volumes'),
]

FLAGS.register_opts(windows_opts)


class WindowsDriver(driver.ISCSIDriver):
    """Executes volume driver commands on Windows Storage server."""
Пример #23
0
This driver requires VPSA with API ver.12.06 or higher.
"""

import httplib

from cinder import exception
from cinder import flags
from cinder.openstack.common import log as logging
from cinder.openstack.common import cfg
from cinder import utils
from cinder.volume import driver
from cinder.volume import iscsi

from lxml import etree

LOG = logging.getLogger("cinder.volume.driver")

zadara_opts = [
    cfg.StrOpt('zadara_vpsa_ip',
               default=None,
               help='Management IP of Zadara VPSA'),
    cfg.StrOpt('zadara_vpsa_port',
               default=None,
               help='Zadara VPSA port number'),
    cfg.BoolOpt('zadara_vpsa_use_ssl',
                default=False,
                help='Use SSL connection'),
    cfg.StrOpt('zadara_user', default=None, help='User name for the VPSA'),
    cfg.StrOpt('zadara_password', default=None, help='Password for the VPSA'),
    cfg.StrOpt('zadara_vpsa_poolname',
               default=None,
Пример #24
0
import os
import subprocess
import urlparse
import uuid

from migrate.versioning import repository
import sqlalchemy
import testtools

import cinder.db.migration as migration
import cinder.db.sqlalchemy.migrate_repo
from cinder.db.sqlalchemy.migration import versioning_api as migration_api
from cinder.openstack.common import log as logging
from cinder import test

LOG = logging.getLogger('cinder.tests.test_migrations')


def _get_connect_string(backend,
                        user="******",
                        passwd="openstack_citest",
                        database="openstack_citest"):
    """Return connect string.

    Try to get a connection with a very specific set of values, if we get
    these then we'll run the tests, otherwise they are skipped.
    """
    if backend == "postgres":
        backend = "postgresql+psycopg2"

    return ("%(backend)s://%(user)s:%(passwd)s@localhost/%(database)s" %
Пример #25
0
from cinder import flags
from cinder.openstack.common import cfg
from cinder.openstack.common import importutils
from cinder.openstack.common import log as logging
from cinder.volume.san import san

ibm_xiv_opts = [
    cfg.StrOpt('xiv_proxy',
               default='xiv_openstack.nova_proxy.XIVNovaProxy',
               help='Proxy driver'),
]

FLAGS = flags.FLAGS
FLAGS.register_opts(ibm_xiv_opts)

LOG = logging.getLogger('cinder.volume.xiv')


class XIVDriver(san.SanISCSIDriver):
    """IBM XIV volume driver."""

    def __init__(self, *args, **kwargs):
        """Initialize the driver."""

        proxy = importutils.import_class(FLAGS.xiv_proxy)

        self.xiv_proxy = proxy({
                "xiv_user": FLAGS.san_login,
                "xiv_pass": FLAGS.san_password,
                "xiv_address": FLAGS.san_ip,
                "xiv_vol_pool": FLAGS.san_clustername
Пример #26
0
import mock
import tempfile

from oslo.config import cfg

from cinder import context
from cinder import db
from cinder import exception
from cinder.openstack.common import importutils
from cinder.openstack.common import log as logging
from cinder.openstack.common import timeutils
from cinder import test

CONF = cfg.CONF

LOG = logging.getLogger(__name__)


class FakeBackupException(Exception):
    pass


class BackupTestCase(test.TestCase):
    """Test Case for backups."""
    def setUp(self):
        super(BackupTestCase, self).setUp()
        vol_tmpdir = tempfile.mkdtemp()
        self.flags(volumes_dir=vol_tmpdir)
        self.backup_mgr = \
            importutils.import_object(CONF.backup_manager)
        self.backup_mgr.host = 'testhost'
Пример #27
0
:mod:`nexenta.volume` -- Driver to store volumes on Nexenta Appliance
=====================================================================

.. automodule:: nexenta.volume
.. moduleauthor:: Yuriy Taraday <*****@*****.**>
"""

from cinder import exception
from cinder import flags
from cinder.openstack.common import log as logging
from cinder.openstack.common import cfg
from cinder.volume import driver
from cinder.volume import nexenta
from cinder.volume.nexenta import jsonrpc

LOG = logging.getLogger("cinder.volume.nexenta.volume")
FLAGS = flags.FLAGS

nexenta_opts = [
    cfg.StrOpt('nexenta_host',
              default='',
              help='IP address of Nexenta SA'),
    cfg.IntOpt('nexenta_rest_port',
               default=2000,
               help='HTTP port to connect to Nexenta REST API server'),
    cfg.StrOpt('nexenta_rest_protocol',
               default='auto',
               help='Use http or https for REST connection (default auto)'),
    cfg.StrOpt('nexenta_user',
               default='admin',
               help='User name to connect to Nexenta SA'),
Пример #28
0
"""

import time
import string
import re

from cinder import exception
from cinder import flags
from cinder.openstack.common import log as logging
from cinder.openstack.common import cfg
from cinder.volume import driver
from cinder.volume import volume_types
from cinder.volume import san


LOG = logging.getLogger("cinder.volume.driver")

netapp_direct_opts = [
    cfg.StrOpt('netapp_direct_volpool_name',
               default='vol0',
               help='Storage system storage pool for volumes'),
    cfg.StrOpt('netapp_direct_login',
               default=None,
               help='User name for the netapp system'),
    cfg.StrOpt('netapp_direct_igroup_name',
               default='openstack',
               help='igroup name for the netapp system'),
    cfg.StrOpt('netapp_direct_password',
               default=None,
               help='Password for the netapp system'),
    cfg.StrOpt('netapp_direct_host',
Пример #29
0
from powervc.volume.driver import service

volume_driver_opts = [

    # Ignore delete errors so an exception is not thrown during a
    # delete.  When set to true, this allows the volume to be deleted
    # on the hosting OS even if an exception occurs. When set to false,
    # exceptions during delete prevent the volume from being deleted
    # on the hosting OS.
    cfg.BoolOpt('volume_driver_ignore_delete_error', default=False)
]

CONF = config.CONF
CONF.register_opts(volume_driver_opts, group='powervc')

LOG = cinderLogging.getLogger(__name__)


def _load_power_config(argv):
    """
    Loads the powervc config.
    """
    # Cinder is typically started with the --config-file option.
    # This prevents the default config files from loading since
    # the olso config code will only load those
    # config files as specified on the command line.
    # If the cinder is started with the
    # --config-file option then append our powervc.conf file to
    # the command line so it gets loaded as well.
    for arg in argv:
        if arg == '--config-file' or arg.startswith('--config-file='):
Пример #30
0
from cinder.openstack.common import log as oslo_logging
from cinder.openstack.common import timeutils
from cinder import rpc
from cinder import service
from cinder.tests import conf_fixture
from cinder.tests import fake_notifier

test_opts = [
    cfg.StrOpt('sqlite_clean_db',
               default='clean.sqlite',
               help='File name of clean sqlite db'), ]

CONF = cfg.CONF
CONF.register_opts(test_opts)

LOG = oslo_logging.getLogger(__name__)

_DB_CACHE = None


class TestingException(Exception):
    pass


class Database(fixtures.Fixture):

    def __init__(self, db_session, db_migrate, sql_connection,
                 sqlite_db, sqlite_clean_db):
        self.sql_connection = sql_connection
        self.sqlite_db = sqlite_db
        self.sqlite_clean_db = sqlite_clean_db
Пример #31
0
from cinder import flags
from cinder.openstack.common import cfg
from cinder.openstack.common import importutils
from cinder.openstack.common import log as logging
from cinder.volume import san

ibm_xiv_opts = [
    cfg.StrOpt('xiv_proxy',
               default='xiv_openstack.nova_proxy.XIVNovaProxy',
               help='Proxy driver'),
]

FLAGS = flags.FLAGS
FLAGS.register_opts(ibm_xiv_opts)

LOG = logging.getLogger('nova.volume.xiv')


class XIVDriver(san.SanISCSIDriver):
    """IBM XIV volume driver."""

    def __init__(self, *args, **kwargs):
        """Initialize the driver."""

        proxy = importutils.import_class(FLAGS.xiv_proxy)

        self.xiv_proxy = proxy({
                "xiv_user": FLAGS.san_login,
                "xiv_pass": FLAGS.san_password,
                "xiv_address": FLAGS.san_ip,
                "xiv_vol_pool": FLAGS.san_clustername
Пример #32
0
from barbicanclient import client as barbican_client
from keystoneclient.auth import identity
from keystoneclient import session
from oslo.config import cfg

from cinder import exception
from cinder.i18n import _
from cinder.keymgr import key as keymgr_key
from cinder.keymgr import key_mgr
from cinder.openstack.common import excutils
from cinder.openstack.common import log as logging

CONF = cfg.CONF
CONF.import_opt('encryption_auth_url', 'cinder.keymgr.key_mgr', group='keymgr')
CONF.import_opt('encryption_api_url', 'cinder.keymgr.key_mgr', group='keymgr')
LOG = logging.getLogger(__name__)


class BarbicanKeyManager(key_mgr.KeyManager):
    """Key Manager Interface that wraps the Barbican client API."""

    def __init__(self):
        self._base_url = CONF.keymgr.encryption_api_url
        # the barbican endpoint can't have the '/v1' on the end
        self._barbican_endpoint = self._base_url.rpartition('/')[0]
        self._barbican_client = None

    def _get_barbican_client(self, ctxt):
        """Creates a client to connect to the Barbican service.

        :param ctxt: the user context for authentication
Пример #33
0
:mod:`nexenta.volume` -- Driver to store volumes on Nexenta Appliance
=====================================================================

.. automodule:: nexenta.volume
.. moduleauthor:: Yuriy Taraday <*****@*****.**>
"""

from cinder import exception
from cinder import flags
from cinder.openstack.common import log as logging
from cinder.openstack.common import cfg
from cinder.volume import driver
from cinder.volume import nexenta
from cinder.volume.nexenta import jsonrpc

LOG = logging.getLogger("cinder.volume.nexenta.volume")
FLAGS = flags.FLAGS

nexenta_opts = [
    cfg.StrOpt('nexenta_host', default='', help='IP address of Nexenta SA'),
    cfg.IntOpt('nexenta_rest_port',
               default=2000,
               help='HTTP port to connect to Nexenta REST API server'),
    cfg.StrOpt('nexenta_rest_protocol',
               default='auto',
               help='Use http or https for REST connection (default auto)'),
    cfg.StrOpt('nexenta_user',
               default='admin',
               help='User name to connect to Nexenta SA'),
    cfg.StrOpt('nexenta_password',
               default='nexenta',