def test_long_vs_short_flags(self): FLAGS.clear() FLAGS.register_cli_opt( cfg.StrOpt('duplicate_answer_long', default='val', help='desc')) argv = ['flags_test', '--duplicate_answer=60', 'extra_arg'] args = flags.parse_args(argv, default_config_files=[]) self.assert_('duplicate_answer' not in FLAGS) self.assert_(FLAGS.duplicate_answer_long, 60) FLAGS.clear() FLAGS.register_cli_opt( cfg.IntOpt('duplicate_answer', default=60, help='desc')) args = flags.parse_args(argv, default_config_files=[]) self.assertEqual(FLAGS.duplicate_answer, 60) self.assertEqual(FLAGS.duplicate_answer_long, 'val')
from cinder import flags from cinder.openstack.common import cfg from cinder.openstack.common import log as logging from cinder.volume import driver # Check needed for unit testing on Unix if os.name == 'nt': import wmi LOG = logging.getLogger("cinder.volume.windows.volume") FLAGS = flags.FLAGS windows_opts = [ cfg.StrOpt('windows_iscsi_lun_path', default='C:\iSCSIVirtualDisks', help='Path to store VHD backed volumes'), ] FLAGS.register_opts(windows_opts) class WindowsDriver(driver.ISCSIDriver): """Executes volume driver commands on Windows Storage server.""" def __init__(self): super(WindowsDriver, self).__init__() def do_setup(self, context): """Setup the Windows Volume driver. Called one time by the manager after the driver is loaded.
import mox import nose.plugins.skip import stubout from cinder import flags from cinder.openstack.common import log as logging from cinder.openstack.common import cfg from cinder.openstack.common import timeutils from cinder import service from cinder import tests from cinder.tests import fake_flags test_opts = [ cfg.StrOpt('sqlite_clean_db', default='clean.sqlite', help='File name of clean sqlite db'), cfg.BoolOpt('fake_tests', default=True, help='should we use everything for testing'), ] FLAGS = flags.FLAGS FLAGS.register_opts(test_opts) LOG = logging.getLogger(__name__) class skip_test(object): """Decorator that skips a test."""
from cinder.openstack.common import importutils from cinder.openstack.common import jsonutils from cinder.openstack.common.rpc import common as rpc_common # for convenience, are not modified. pformat = pprint.pformat Timeout = eventlet.timeout.Timeout LOG = rpc_common.LOG RemoteError = rpc_common.RemoteError RPCException = rpc_common.RPCException zmq_opts = [ cfg.StrOpt('rpc_zmq_bind_address', default='*', help='ZeroMQ bind address. Should be a wildcard (*), ' 'an ethernet interface, or IP. ' 'The "host" option should point or resolve to this ' 'address.'), # The module.Class to use for matchmaking. cfg.StrOpt( 'rpc_zmq_matchmaker', default=('cinder.openstack.common.rpc.' 'matchmaker.MatchMakerLocalhost'), help='MatchMaker driver', ), # The following port is unassigned by IANA as of 2012-05-21 cfg.IntOpt('rpc_zmq_port', default=9501, help='ZeroMQ receiver listening port'),
Helper code for the iSCSI volume driver. """ import os from cinder import exception from cinder import flags from cinder.openstack.common import cfg from cinder.openstack.common import log as logging from cinder import utils LOG = logging.getLogger(__name__) iscsi_helper_opt = [ cfg.StrOpt('iscsi_helper', default='tgtadm', help='iscsi target user-land tool to use'), cfg.StrOpt('volumes_dir', default='$state_path/volumes', help='Volume configuration file storage directory'), ] FLAGS = flags.FLAGS FLAGS.register_opts(iscsi_helper_opt) class TargetAdmin(object): """iSCSI target administration. Base class for iSCSI target admin helpers. """
The MatchMaker classes should except a Topic or Fanout exchange key and return keys for direct exchanges, per (approximate) AMQP parlance. """ import contextlib import itertools import json import logging from cinder.openstack.common import cfg from cinder.openstack.common.gettextutils import _ matchmaker_opts = [ # Matchmaker ring file cfg.StrOpt('matchmaker_ringfile', default='/etc/nova/matchmaker_ring.json', help='Matchmaker ring file (JSON)'), ] CONF = cfg.CONF CONF.register_opts(matchmaker_opts) LOG = logging.getLogger(__name__) contextmanager = contextlib.contextmanager class MatchMakerException(Exception): """Signified a match could not be found.""" message = _("Match not found by MatchMaker.") class Exchange(object):
# Avishay Traeger <*****@*****.**> """ Volume driver for IBM XIV storage systems. """ from cinder import exception from cinder import flags from cinder.openstack.common import cfg from cinder.openstack.common import importutils from cinder.openstack.common import log as logging from cinder.volume import san ibm_xiv_opts = [ cfg.StrOpt('xiv_proxy', default='xiv_openstack.nova_proxy.XIVNovaProxy', help='Proxy driver'), ] FLAGS = flags.FLAGS FLAGS.register_opts(ibm_xiv_opts) LOG = logging.getLogger('cinder.volume.xiv') class XIVDriver(san.SanISCSIDriver): """IBM XIV volume driver.""" def __init__(self, *args, **kwargs): """Initialize the driver."""
import paramiko from cinder import exception from cinder import flags from cinder.openstack.common import log as logging from cinder.openstack.common import cfg from cinder.openstack.common import jsonutils from cinder import utils from cinder.volume.san import SanISCSIDriver LOG = logging.getLogger(__name__) eqlx_opts = [ cfg.StrOpt('eqlx_group_name', default='group-0', help='Group name to use for creating volumes'), cfg.IntOpt('eqlx_ssh_keepalive_interval', default=1200, help='Seconds to wait before sending a keepalive packet'), cfg.IntOpt('eqlx_cli_timeout', default=30, help='Timeout for the Group Manager cli command execution'), cfg.IntOpt('eqlx_cli_max_retries', default=5, help='Maximum retry count for reconnection'), cfg.IntOpt('eqlx_cli_retries_timeout', default=30, help='Seconds to sleep before the next reconnection retry'), cfg.BoolOpt('eqlx_use_chap', default=False,
# Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Policy Engine For Cinder""" from cinder.common import policy from cinder import exception from cinder import flags from cinder.openstack.common import cfg from cinder import utils policy_opts = [ cfg.StrOpt('policy_file', default='policy.json', help=_('JSON file representing policy')), cfg.StrOpt('policy_default_rule', default='default', help=_('Rule checked when requested rule is not found')), ] FLAGS = flags.FLAGS FLAGS.register_opts(policy_opts) _POLICY_PATH = None _POLICY_CACHE = {} def reset(): global _POLICY_PATH
""" from cinder import db from cinder import flags from cinder.openstack.common import log as logging from cinder.openstack.common import cfg from cinder.openstack.common import importutils from cinder.openstack.common import rpc from cinder.openstack.common import timeutils from cinder import utils LOG = logging.getLogger(__name__) scheduler_driver_opts = [ cfg.StrOpt('scheduler_host_manager', default='cinder.scheduler.host_manager.HostManager', help='The scheduler host manager class to use'), ] FLAGS = flags.FLAGS FLAGS.register_opts(scheduler_driver_opts) def cast_to_volume_host(context, host, method, update_db=True, **kwargs): """Cast request to a volume host queue""" if update_db: volume_id = kwargs.get('volume_id', None) if volume_id is not None: now = timeutils.utcnow() db.volume_update(context, volume_id, {
default=10, help='number of volumes allowed per project'), cfg.IntOpt('quota_gigabytes', default=1000, help='number of volume gigabytes allowed per project'), cfg.IntOpt('reservation_expire', default=86400, help='number of seconds until a reservation expires'), cfg.IntOpt('until_refresh', default=0, help='count of reservations until usage is refreshed'), cfg.IntOpt('max_age', default=0, help='number of seconds between subsequent usage refreshes'), cfg.StrOpt('quota_driver', default='cinder.quota.DbQuotaDriver', help='default driver to use for quota checks'), ] FLAGS = flags.FLAGS FLAGS.register_opts(quota_opts) class DbQuotaDriver(object): """ Driver to perform necessary checks to enforce quotas and obtain quota information. The default driver utilizes the local database. """ def get_by_project(self, context, project_id, resource): """Get a specific quota by project."""
""" import functools from cinder import db from cinder import flags from cinder.openstack.common import log as logging from cinder import manager from cinder.openstack.common import cfg from cinder.openstack.common import excutils from cinder.openstack.common import importutils LOG = logging.getLogger(__name__) scheduler_driver_opt = cfg.StrOpt( 'scheduler_driver', default='cinder.scheduler.simple.SimpleScheduler', help='Default driver to use for the scheduler') FLAGS = flags.FLAGS FLAGS.register_opt(scheduler_driver_opt) class SchedulerManager(manager.Manager): """Chooses a host to create volumes""" RPC_API_VERSION = '1.0' def __init__(self, scheduler_driver=None, *args, **kwargs): if not scheduler_driver: scheduler_driver = FLAGS.scheduler_driver self.driver = importutils.import_object(scheduler_driver)
import os import errno import ctypes from cinder import flags from cinder.openstack.common import cfg from cinder.openstack.common import log as logging from cinder.volume import driver from cinder import exception LOG = logging.getLogger("cinder.volume.driver") volume_opts = [ cfg.StrOpt('nfs_shares_config', default=None, help='File with the list of available nfs shares'), cfg.StrOpt('nfs_mount_point_base', default='$state_path/mnt', help='Base dir where nfs expected to be mounted'), cfg.StrOpt('nfs_disk_util', default='df', help='Use du or df for free space calculation'), cfg.BoolOpt('nfs_sparsed_volumes', default=True, help=('Create volumes as sparsed files which take no space.' 'If set to False volume is created as regular file.' 'In such case volume creation takes a lot of time.')) ] FLAGS = flags.FLAGS
import eventlet import greenlet import qpid.messaging import qpid.messaging.exceptions from cinder.openstack.common import cfg from cinder.openstack.common.gettextutils import _ from cinder.openstack.common import jsonutils from cinder.openstack.common.rpc import amqp as rpc_amqp from cinder.openstack.common.rpc import common as rpc_common LOG = logging.getLogger(__name__) qpid_opts = [ cfg.StrOpt('qpid_hostname', default='localhost', help='Qpid broker hostname'), cfg.StrOpt('qpid_port', default='5672', help='Qpid broker port'), cfg.StrOpt('qpid_username', default='', help='Username for qpid connection'), cfg.StrOpt('qpid_password', default='', help='Password for qpid connection'), cfg.StrOpt('qpid_sasl_mechanisms', default='', help='Space separated list of SASL mechanisms to use for auth'), cfg.BoolOpt('qpid_reconnect', default=True, help='Automatically reconnect'), cfg.IntOpt('qpid_reconnect_timeout', default=0,
from cinder import exception from cinder import flags from cinder.openstack.common import log as logging from cinder.openstack.common import cfg from cinder import utils from cinder.volume import driver from cinder.volume import iscsi from lxml import etree LOG = logging.getLogger("cinder.volume.driver") zadara_opts = [ cfg.StrOpt('zadara_vpsa_ip', default=None, help='Management IP of Zadara VPSA'), cfg.StrOpt('zadara_vpsa_port', default=None, help='Zadara VPSA port number'), cfg.BoolOpt('zadara_vpsa_use_ssl', default=False, help='Use SSL connection'), cfg.StrOpt('zadara_user', default=None, help='User name for the VPSA'), cfg.StrOpt('zadara_password', default=None, help='Password for the VPSA'), cfg.StrOpt('zadara_vpsa_poolname', default=None, help='Name of VPSA storage pool for volumes'), cfg.StrOpt('zadara_default_cache_policy', default='write-through', help='Default cache policy for volumes'),
from cinder import exception from cinder import flags from cinder.openstack.common import log as logging from cinder.openstack.common import cfg from cinder.openstack.common import jsonutils from cinder import utils import cinder.volume.driver LOG = logging.getLogger(__name__) san_opts = [ cfg.BoolOpt('san_thin_provision', default=True, help='Use thin provisioning for SAN volumes?'), cfg.StrOpt('san_ip', default='', help='IP address of SAN controller'), cfg.StrOpt('san_login', default='admin', help='Username for SAN controller'), cfg.StrOpt('san_password', default='', help='Password for SAN controller'), cfg.StrOpt('san_private_key', default='', help='Filename of private key to use for SSH authentication'), cfg.StrOpt('san_clustername', default='', help='Cluster name to use for creating volumes'), cfg.IntOpt('san_ssh_port', default=22, help='SSH port to use with SAN'), cfg.BoolOpt('san_is_local', default=False, help='Execute commands locally instead of over SSH; ' 'use if the volume service is running on the SAN device'),
case, a Google DNS server is used, but the specific address does not matter much. No traffic is actually sent. """ try: csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) csock.connect(('8.8.8.8', 80)) (addr, port) = csock.getsockname() csock.close() return addr except socket.error: return "127.0.0.1" core_opts = [ cfg.StrOpt('connection_type', default=None, help='Virtualization api connection type : libvirt, xenapi, ' 'or fake'), cfg.StrOpt('sql_connection', default='sqlite:///$state_path/$sqlite_db', help='The SQLAlchemy connection string used to connect to the ' 'database'), cfg.IntOpt('sql_connection_debug', default=0, help='Verbosity of SQL debugging information. 0=None, ' '100=Everything'), cfg.StrOpt('api_paste_config', default="api-paste.ini", help='File name for the paste.deploy config for cinder-api'), cfg.StrOpt('pybasedir', default=os.path.abspath( os.path.join(os.path.dirname(__file__), '../')),
import os from cinder import exception from cinder import flags from cinder import utils from cinder.openstack.common import cfg from cinder.openstack.common import log as logging from cinder.volume import iscsi from cinder.volume.driver import _iscsi_location from cinder.volume.san import SanISCSIDriver LOG = logging.getLogger(__name__) san_opts = [ cfg.StrOpt('san_zfs_command', default='zfs', help='The ZFS command.'), ] FLAGS = flags.FLAGS FLAGS.register_opts(san_opts) class ZFSonLinuxISCSIDriver(SanISCSIDriver): """Executes commands relating to ZFS-on-Linux-hosted ISCSI volumes. Basic setup for a ZoL iSCSI server: XXX Note that current implementation of ZFS on Linux does not handle:
# not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Base class for classes that need modular database access.""" from cinder import flags from cinder.openstack.common import cfg from cinder.openstack.common import importutils db_driver_opt = cfg.StrOpt('db_driver', default='cinder.db', help='driver to use for database access') FLAGS = flags.FLAGS FLAGS.register_opt(db_driver_opt) class Base(object): """DB driver is injected in the init method.""" def __init__(self, db_driver=None): if not db_driver: db_driver = FLAGS.db_driver self.db = importutils.import_module(db_driver) # pylint: disable=C0103
from cinder.openstack.common import cfg from cinder.openstack.common import excutils from cinder.openstack.common import importutils from cinder.openstack.common import timeutils from cinder import quota from cinder import utils from cinder.volume import utils as volume_utils LOG = logging.getLogger(__name__) QUOTAS = quota.QUOTAS volume_manager_opts = [ cfg.StrOpt('volume_driver', default='cinder.volume.driver.ISCSIDriver', help='Driver to use for volume creation'), cfg.BoolOpt('use_local_volumes', default=True, help='if True, will not discover local volumes'), cfg.BoolOpt('volume_force_update_capabilities', default=False, help='if True will force update capabilities on each check'), ] FLAGS = flags.FLAGS FLAGS.register_opts(volume_manager_opts) class VolumeManager(manager.SchedulerDependentManager): """Manages attachable block storage devices."""
LOG = logging.getLogger(__name__) service_opts = [ cfg.IntOpt('report_interval', default=10, help='seconds between nodes reporting state to datastore'), cfg.IntOpt('periodic_interval', default=60, help='seconds between running periodic tasks'), cfg.IntOpt('periodic_fuzzy_delay', default=60, help='range of seconds to randomly delay when starting the' ' periodic task scheduler to reduce stampeding.' ' (Disable by setting to 0)'), cfg.StrOpt('osapi_volume_listen', default="0.0.0.0", help='IP address for OpenStack Volume API to listen'), cfg.IntOpt('osapi_volume_listen_port', default=8776, help='port for os volume api to listen'), ] FLAGS = flags.FLAGS FLAGS.register_opts(service_opts) class Launcher(object): """Launch one or more services and wait for them to complete.""" def __init__(self): """Initialize the service launcher.
import string import uuid from cinder import exception from cinder import flags from cinder.openstack.common import cfg from cinder.openstack.common import log as logging from cinder.volume.san import SanISCSIDriver LOG = logging.getLogger(__name__) sf_opts = [ cfg.BoolOpt('sf_emulate_512', default=True, help='Set 512 byte emulation on volume creation; '), cfg.StrOpt('sf_mvip', default='', help='IP address of SolidFire MVIP'), cfg.StrOpt('sf_login', default='admin', help='Username for SF Cluster Admin'), cfg.StrOpt('sf_password', default='', help='Password for SF Cluster Admin'), cfg.BoolOpt('sf_allow_tenant_qos', default=True, help='Allow tenants to specify QOS on create'), ] FLAGS = flags.FLAGS FLAGS.register_opts(sf_opts) class SolidFire(SanISCSIDriver):
import mox from cinder import context from cinder import db from cinder import exception from cinder import flags from cinder.openstack.common import cfg from cinder import test from cinder import service from cinder import manager from cinder import wsgi test_service_opts = [ cfg.StrOpt("fake_manager", default="cinder.tests.test_service.FakeManager", help="Manager for testing"), cfg.StrOpt("test_service_listen", default=None, help="Host to bind test service to"), cfg.IntOpt("test_service_listen_port", default=0, help="Port number to bind test service to"), ] flags.FLAGS.register_opts(test_service_opts) class FakeManager(manager.Manager): """Fake manager for tests""" def test_method(self):
# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder import flags from cinder.openstack.common import cfg from cinder import test FLAGS = flags.FLAGS FLAGS.register_opt( cfg.StrOpt('flags_unittest', default='foo', help='for testing purposes only')) class FlagsTestCase(test.TestCase): def setUp(self): super(FlagsTestCase, self).setUp() def test_declare(self): self.assert_('answer' not in FLAGS) flags.DECLARE('answer', 'cinder.tests.declare_flags') self.assert_('answer' in FLAGS) self.assertEqual(FLAGS.answer, 42) # Make sure we don't overwrite anything FLAGS.set_override('answer', 256)
`sqlite:///var/lib/cinder/cinder.sqlite`. :enable_new_services: when adding a new service to the database, is it in the pool of available hardware (Default: True) """ from cinder import exception from cinder import flags from cinder.openstack.common import cfg from cinder import utils db_opts = [ cfg.StrOpt('db_backend', default='sqlalchemy', help='The backend to use for db'), cfg.BoolOpt('enable_new_services', default=True, help='Services to be added to the available pool on create'), cfg.StrOpt('volume_name_template', default='volume-%s', help='Template string to be used to generate volume names'), cfg.StrOpt('snapshot_name_template', default='snapshot-%s', help='Template string to be used to generate snapshot names'), ] FLAGS = flags.FLAGS FLAGS.register_opts(db_opts)
# License for the specific language governing permissions and limitations # under the License. """ A remote procedure call (rpc) abstraction. For some wrappers that add message versioning to rpc, see: rpc.dispatcher rpc.proxy """ from cinder.openstack.common import cfg from cinder.openstack.common import importutils rpc_opts = [ cfg.StrOpt('rpc_backend', default='%s.impl_kombu' % __package__, help="The messaging module to use, defaults to kombu."), cfg.IntOpt('rpc_thread_pool_size', default=64, help='Size of RPC thread pool'), cfg.IntOpt('rpc_conn_pool_size', default=30, help='Size of RPC connection pool'), cfg.IntOpt('rpc_response_timeout', default=60, help='Seconds to wait for a response from call or multicall'), cfg.IntOpt('rpc_cast_timeout', default=30, help='Seconds to wait before a cast expires (TTL). ' 'Only supported by impl_zmq.'), cfg.ListOpt('allowed_rpc_exception_modules',
import tempfile import time import urllib from cinder import exception from cinder import flags from cinder.openstack.common import log as logging from cinder.openstack.common import cfg from cinder import utils from cinder.volume import iscsi LOG = logging.getLogger(__name__) volume_opts = [ cfg.StrOpt('volume_group', default='cinder-volumes', help='Name for the VG that will contain exported volumes'), cfg.IntOpt('num_shell_tries', default=3, help='number of times to attempt to run flakey shell commands'), cfg.IntOpt('num_iscsi_scan_tries', default=3, help='number of times to rescan iSCSI target to find volume'), cfg.IntOpt('iscsi_num_targets', default=100, help='Number of iscsi target ids per host'), cfg.StrOpt('iscsi_target_prefix', default='iqn.2010-10.org.openstack:', help='prefix for iscsi volumes'), cfg.StrOpt('iscsi_ip_address', default='$my_ip',
import eventlet import greenlet import kombu import kombu.connection import kombu.entity import kombu.messaging from cinder.openstack.common import cfg from cinder.openstack.common.gettextutils import _ from cinder.openstack.common.rpc import amqp as rpc_amqp from cinder.openstack.common.rpc import common as rpc_common from cinder.openstack.common import network_utils kombu_opts = [ cfg.StrOpt('kombu_ssl_version', default='', help='SSL version to use (valid only if SSL enabled)'), cfg.StrOpt('kombu_ssl_keyfile', default='', help='SSL key file (valid only if SSL enabled)'), cfg.StrOpt('kombu_ssl_certfile', default='', help='SSL cert file (valid only if SSL enabled)'), cfg.StrOpt('kombu_ssl_ca_certs', default='', help=('SSL certification authority file ' '(valid only if SSL enabled)')), cfg.StrOpt('rabbit_host', default='localhost', help='The RabbitMQ broker address where a single node is used'), cfg.IntOpt('rabbit_port',
from cinder.openstack.common.gettextutils import _ from cinder.openstack.common import importutils from cinder.openstack.common import jsonutils from cinder.openstack.common import log as logging from cinder.openstack.common import timeutils LOG = logging.getLogger(__name__) notifier_opts = [ cfg.MultiStrOpt('notification_driver', default=[], deprecated_name='list_notifier_drivers', help='Driver or drivers to handle sending notifications'), cfg.StrOpt('default_notification_level', default='INFO', help='Default notification level for outgoing notifications'), cfg.StrOpt('default_publisher_id', default='$host', help='Default publisher_id for outgoing notifications'), ] CONF = cfg.CONF CONF.register_opts(notifier_opts) WARN = 'WARN' INFO = 'INFO' ERROR = 'ERROR' CRITICAL = 'CRITICAL' DEBUG = 'DEBUG'
.. moduleauthor:: Yuriy Taraday <*****@*****.**> """ from cinder import exception from cinder import flags from cinder.openstack.common import log as logging from cinder.openstack.common import cfg from cinder.volume import driver from cinder.volume import nexenta from cinder.volume.nexenta import jsonrpc LOG = logging.getLogger("cinder.volume.nexenta.volume") FLAGS = flags.FLAGS nexenta_opts = [ cfg.StrOpt('nexenta_host', default='', help='IP address of Nexenta SA'), cfg.IntOpt('nexenta_rest_port', default=2000, help='HTTP port to connect to Nexenta REST API server'), cfg.StrOpt('nexenta_rest_protocol', default='auto', help='Use http or https for REST connection (default auto)'), cfg.StrOpt('nexenta_user', default='admin', help='User name to connect to Nexenta SA'), cfg.StrOpt('nexenta_password', default='nexenta', help='Password to connect to Nexenta SA'), cfg.IntOpt('nexenta_iscsi_target_portal_port', default=3260, help='Nexenta target portal port'),