import stubout from cinder import flags from cinder.openstack.common import log as logging from cinder.openstack.common import cfg from cinder.openstack.common import timeutils from cinder import service from cinder import tests from cinder.tests import fake_flags test_opts = [ cfg.StrOpt('sqlite_clean_db', default='clean.sqlite', help='File name of clean sqlite db'), cfg.BoolOpt('fake_tests', default=True, help='should we use everything for testing'), ] FLAGS = flags.FLAGS FLAGS.register_opts(test_opts) LOG = logging.getLogger(__name__) class skip_test(object): """Decorator that skips a test.""" # TODO(tr3buchet): remember forever what comstud did here def __init__(self, msg): self.message = msg
pool of available hardware (Default: True) """ from cinder import exception from cinder import flags from cinder.openstack.common import cfg from cinder import utils db_opts = [ cfg.StrOpt('db_backend', default='sqlalchemy', help='The backend to use for db'), cfg.BoolOpt('enable_new_services', default=True, help='Services to be added to the available pool on create'), cfg.StrOpt('volume_name_template', default='volume-%s', help='Template string to be used to generate volume names'), cfg.StrOpt('snapshot_name_template', default='snapshot-%s', help='Template string to be used to generate snapshot names'), ] FLAGS = flags.FLAGS FLAGS.register_opts(db_opts) IMPL = utils.LazyPluggable('db_backend', sqlalchemy='cinder.db.sqlalchemy.api')
import functools from cinder.db import base from cinder import exception from cinder import flags from cinder.openstack.common import cfg from cinder.image import glance from cinder.openstack.common import log as logging from cinder.openstack.common import rpc from cinder.openstack.common import timeutils import cinder.policy from cinder import quota volume_host_opt = cfg.BoolOpt('snapshot_same_host', default=True, help='Create volume from snapshot at the host where snapshot resides') FLAGS = flags.FLAGS FLAGS.register_opt(volume_host_opt) flags.DECLARE('storage_availability_zone', 'cinder.volume.manager') LOG = logging.getLogger(__name__) GB = 1048576 * 1024 QUOTAS = quota.QUOTAS def wrap_check_policy(func): """Check policy corresponding to the wrapped methods prior to execution This decorator requires the first 3 args of the wrapped function
default='group-0', help='Group name to use for creating volumes'), cfg.IntOpt('eqlx_ssh_keepalive_interval', default=1200, help='Seconds to wait before sending a keepalive packet'), cfg.IntOpt('eqlx_cli_timeout', default=30, help='Timeout for the Group Manager cli command execution'), cfg.IntOpt('eqlx_cli_max_retries', default=5, help='Maximum retry count for reconnection'), cfg.IntOpt('eqlx_cli_retries_timeout', default=30, help='Seconds to sleep before the next reconnection retry'), cfg.BoolOpt('eqlx_use_chap', default=False, help='Use CHAP authentificaion for targets?'), cfg.StrOpt('eqlx_chap_login', default='admin', help='Existing CHAP account name'), cfg.StrOpt('eqlx_chap_password', default='password', help='Password for specified CHAP account name'), cfg.BoolOpt('eqlx_verbose_ssh', default=False, help='Print SSH debugging output to stderr'), cfg.StrOpt('eqlx_pool', default='default', help='Pool in which volumes will be created') ]
Common Auth Middleware. """ import webob.dec import webob.exc from cinder import context from cinder import flags from cinder.openstack.common import log as logging from cinder.openstack.common import cfg from cinder import wsgi use_forwarded_for_opt = cfg.BoolOpt( 'use_forwarded_for', default=False, help='Treat X-Forwarded-For as the canonical remote address. ' 'Only enable this if you have a sanitizing proxy.') FLAGS = flags.FLAGS FLAGS.register_opt(use_forwarded_for_opt) LOG = logging.getLogger(__name__) def pipeline_factory(loader, global_conf, **local_conf): """A paste pipeline replica that keys off of auth_strategy.""" pipeline = local_conf[FLAGS.auth_strategy] if not FLAGS.api_rate_limit: limit_name = FLAGS.auth_strategy + '_nolimit' pipeline = local_conf.get(limit_name, pipeline) pipeline = pipeline.split()
from lxml import etree from cinder import exception from cinder import flags from cinder.openstack.common import log as logging from cinder.openstack.common import cfg from cinder.openstack.common import jsonutils from cinder import utils import cinder.volume.driver LOG = logging.getLogger(__name__) san_opts = [ cfg.BoolOpt('san_thin_provision', default=True, help='Use thin provisioning for SAN volumes?'), cfg.StrOpt('san_ip', default='', help='IP address of SAN controller'), cfg.StrOpt('san_login', default='admin', help='Username for SAN controller'), cfg.StrOpt('san_password', default='', help='Password for SAN controller'), cfg.StrOpt('san_private_key', default='', help='Filename of private key to use for SSH authentication'), cfg.StrOpt('san_clustername', default='', help='Cluster name to use for creating volumes'), cfg.IntOpt('san_ssh_port', default=22, help='SSH port to use with SAN'), cfg.BoolOpt('san_is_local', default=False,
cfg.IntOpt('glance_port', default=9292, help='default glance port'), cfg.ListOpt('glance_api_servers', default=['$glance_host:$glance_port'], help='A list of the glance api servers available to cinder ' '([hostname|ip]:port)'), cfg.IntOpt('glance_num_retries', default=0, help='Number retries when downloading an image from glance'), cfg.StrOpt('scheduler_topic', default='cinder-scheduler', help='the topic scheduler nodes listen on'), cfg.StrOpt('volume_topic', default='cinder-volume', help='the topic volume nodes listen on'), cfg.BoolOpt('api_rate_limit', default=True, help='whether to rate limit the api'), cfg.ListOpt('osapi_volume_ext_list', default=[], help='Specify list of extensions to load when using osapi_' 'volume_extension option with cinder.api.openstack.' 'volume.contrib.select_extensions'), cfg.MultiStrOpt( 'osapi_volume_extension', default=['cinder.api.openstack.volume.contrib.standard_extensions'], help='osapi volume extension to load'), cfg.StrOpt('osapi_compute_link_prefix', default=None, help='Base URL that will be presented to users in links ' 'to the OpenStack Compute API'), cfg.IntOpt('osapi_max_limit',
default=60, help='Seconds to wait for a response from call or multicall'), cfg.IntOpt('rpc_cast_timeout', default=30, help='Seconds to wait before a cast expires (TTL). ' 'Only supported by impl_zmq.'), cfg.ListOpt('allowed_rpc_exception_modules', default=[ 'cinder.openstack.common.exception', 'nova.exception', 'cinder.exception', ], help='Modules of exceptions that are permitted to be recreated' 'upon receiving exception data from an rpc call.'), cfg.BoolOpt('fake_rabbit', default=False, help='If passed, use a fake RabbitMQ provider'), # # The following options are not registered here, but are expected to be # present. The project using this library must register these options with # the configuration so that project-specific defaults may be defined. # #cfg.StrOpt('control_exchange', # default='nova', # help='AMQP exchange to connect to if using RabbitMQ or Qpid'), ] cfg.CONF.register_opts(rpc_opts) def create_connection(new=True):
help='SSL cert file (valid only if SSL enabled)'), cfg.StrOpt('kombu_ssl_ca_certs', default='', help=('SSL certification authority file ' '(valid only if SSL enabled)')), cfg.StrOpt('rabbit_host', default='localhost', help='The RabbitMQ broker address where a single node is used'), cfg.IntOpt('rabbit_port', default=5672, help='The RabbitMQ broker port where a single node is used'), cfg.ListOpt('rabbit_hosts', default=['$rabbit_host:$rabbit_port'], help='RabbitMQ HA cluster host:port pairs'), cfg.BoolOpt('rabbit_use_ssl', default=False, help='connect over SSL for RabbitMQ'), cfg.StrOpt('rabbit_userid', default='guest', help='the RabbitMQ userid'), cfg.StrOpt('rabbit_password', default='guest', help='the RabbitMQ password'), cfg.StrOpt('rabbit_virtual_host', default='/', help='the RabbitMQ virtual host'), cfg.IntOpt('rabbit_retry_interval', default=1, help='how frequently to retry connecting with RabbitMQ'), cfg.IntOpt('rabbit_retry_backoff', default=2, help='how long to backoff for between retries when connecting ' 'to RabbitMQ'),
from cinder.openstack.common import timeutils from cinder import quota from cinder import utils from cinder.volume import utils as volume_utils LOG = logging.getLogger(__name__) QUOTAS = quota.QUOTAS volume_manager_opts = [ cfg.StrOpt('volume_driver', default='cinder.volume.driver.ISCSIDriver', help='Driver to use for volume creation'), cfg.BoolOpt('use_local_volumes', default=True, help='if True, will not discover local volumes'), cfg.BoolOpt('volume_force_update_capabilities', default=False, help='if True will force update capabilities on each check'), ] FLAGS = flags.FLAGS FLAGS.register_opts(volume_manager_opts) class VolumeManager(manager.SchedulerDependentManager): """Manages attachable block storage devices.""" def __init__(self, volume_driver=None, *args, **kwargs): """Load the driver from the one specified in args, or from flags.""" if not volume_driver:
import random import socket import string import uuid from cinder import exception from cinder import flags from cinder.openstack.common import cfg from cinder.openstack.common import log as logging from cinder.volume.san import SanISCSIDriver LOG = logging.getLogger(__name__) sf_opts = [ cfg.BoolOpt('sf_emulate_512', default=True, help='Set 512 byte emulation on volume creation; '), cfg.StrOpt('sf_mvip', default='', help='IP address of SolidFire MVIP'), cfg.StrOpt('sf_login', default='admin', help='Username for SF Cluster Admin'), cfg.StrOpt('sf_password', default='', help='Password for SF Cluster Admin'), cfg.BoolOpt('sf_allow_tenant_qos', default=True, help='Allow tenants to specify QOS on create'), ] FLAGS = flags.FLAGS FLAGS.register_opts(sf_opts)
default='', help='SSL version to use (valid only if SSL enabled)'), cfg.StrOpt('kombu_ssl_keyfile', default='', help='SSL key file (valid only if SSL enabled)'), cfg.StrOpt('kombu_ssl_certfile', default='', help='SSL cert file (valid only if SSL enabled)'), cfg.StrOpt('kombu_ssl_ca_certs', default='', help=('SSL certification authority file ' '(valid only if SSL enabled)')), cfg.StrOpt('rabbit_host', default='localhost', help='the RabbitMQ host'), cfg.IntOpt('rabbit_port', default=5672, help='the RabbitMQ port'), cfg.BoolOpt('rabbit_use_ssl', default=False, help='connect over SSL for RabbitMQ'), cfg.StrOpt('rabbit_userid', default='guest', help='the RabbitMQ userid'), cfg.StrOpt('rabbit_password', default='guest', help='the RabbitMQ password'), cfg.StrOpt('rabbit_virtual_host', default='/', help='the RabbitMQ virtual host'), cfg.IntOpt('rabbit_retry_interval', default=1, help='how frequently to retry connecting with RabbitMQ'), cfg.IntOpt('rabbit_retry_backoff', default=2, help='how long to backoff for between retries when connecting ' 'to RabbitMQ'),
LOG = logging.getLogger("cinder.volume.driver") volume_opts = [ cfg.StrOpt('nfs_shares_config', default=None, help='File with the list of available nfs shares'), cfg.StrOpt('nfs_mount_point_base', default='$state_path/mnt', help='Base dir where nfs expected to be mounted'), cfg.StrOpt('nfs_disk_util', default='df', help='Use du or df for free space calculation'), cfg.BoolOpt('nfs_sparsed_volumes', default=True, help=('Create volumes as sparsed files which take no space.' 'If set to False volume is created as regular file.' 'In such case volume creation takes a lot of time.')) ] FLAGS = flags.FLAGS FLAGS.register_opts(volume_opts) class NfsDriver(driver.VolumeDriver): """NFS based cinder driver. Creates file on NFS share for using it as block device on hypervisor.""" def do_setup(self, context): """Any initialization the volume driver does while starting""" super(NfsDriver, self).do_setup(context)
qpid_opts = [ cfg.StrOpt('qpid_hostname', default='localhost', help='Qpid broker hostname'), cfg.StrOpt('qpid_port', default='5672', help='Qpid broker port'), cfg.StrOpt('qpid_username', default='', help='Username for qpid connection'), cfg.StrOpt('qpid_password', default='', help='Password for qpid connection'), cfg.StrOpt('qpid_sasl_mechanisms', default='', help='Space separated list of SASL mechanisms to use for auth'), cfg.BoolOpt('qpid_reconnect', default=True, help='Automatically reconnect'), cfg.IntOpt('qpid_reconnect_timeout', default=0, help='Reconnection timeout in seconds'), cfg.IntOpt('qpid_reconnect_limit', default=0, help='Max reconnections before giving up'), cfg.IntOpt('qpid_reconnect_interval_min', default=0, help='Minimum seconds between reconnection attempts'), cfg.IntOpt('qpid_reconnect_interval_max', default=0, help='Maximum seconds between reconnection attempts'), cfg.IntOpt('qpid_reconnect_interval', default=0, help='Equivalent to setting max and min to the same value'),
from cinder.volume import driver from cinder.volume import iscsi from lxml import etree LOG = logging.getLogger("cinder.volume.driver") zadara_opts = [ cfg.StrOpt('zadara_vpsa_ip', default=None, help='Management IP of Zadara VPSA'), cfg.StrOpt('zadara_vpsa_port', default=None, help='Zadara VPSA port number'), cfg.BoolOpt('zadara_vpsa_use_ssl', default=False, help='Use SSL connection'), cfg.StrOpt('zadara_user', default=None, help='User name for the VPSA'), cfg.StrOpt('zadara_password', default=None, help='Password for the VPSA'), cfg.StrOpt('zadara_vpsa_poolname', default=None, help='Name of VPSA storage pool for volumes'), cfg.StrOpt('zadara_default_cache_policy', default='write-through', help='Default cache policy for volumes'), cfg.StrOpt('zadara_default_encryption', default='NO', help='Default encryption policy for volumes'), cfg.StrOpt('zadara_default_striping_mode', default='simple', help='Default striping mode for volumes'),
default=3260, help='Nexenta target portal port'), cfg.StrOpt('nexenta_volume', default='cinder', help='pool on SA that will hold all volumes'), cfg.StrOpt('nexenta_target_prefix', default='iqn.1986-03.com.sun:02:cinder-', help='IQN prefix for iSCSI targets'), cfg.StrOpt('nexenta_target_group_prefix', default='cinder/', help='prefix for iSCSI target groups on SA'), cfg.StrOpt('nexenta_blocksize', default='', help='block size for volumes (blank=default,8KB)'), cfg.BoolOpt('nexenta_sparse', default=False, help='flag to create sparse volumes'), ] FLAGS.register_opts(nexenta_opts) class NexentaDriver(driver.ISCSIDriver): # pylint: disable=R0921 """Executes volume driver commands on Nexenta Appliance.""" def __init__(self): super(NexentaDriver, self).__init__() def do_setup(self, context): protocol = FLAGS.nexenta_rest_protocol auto = protocol == 'auto' if auto: protocol = 'http'