示例#1
0
class CiscoCfgAgent(manager.Manager):
    """Cisco Cfg Agent.

    This class defines a generic configuration agent for cisco devices which
    implement network services in the cloud backend. It is based on the
    (reference) l3-agent, but has been enhanced to support multiple services
     in addition to routing.

    The agent acts like as a container for services and does not do any
    service specific processing or configuration itself.
    All service specific processing is delegated to service helpers which
    the agent loads. Thus routing specific updates are processed by the
    routing service helper, firewall by firewall helper etc.
    A further layer of abstraction is implemented by using device drivers for
    encapsulating all configuration operations of a service on a device.
    Device drivers are specific to a particular device/service VM eg: CSR1kv.

    The main entry points in this class are the `process_services()` and
    `_backlog_task()` .
    """
    RPC_API_VERSION = '1.1'

    OPTS = [
        cfg.IntOpt('rpc_loop_interval',
                   default=10,
                   help=_("Interval when the process_services() loop "
                          "executes in seconds. This is when the config agent "
                          "lets each service helper to process its neutron "
                          "resources.")),
        cfg.StrOpt('routing_svc_helper_class',
                   default='neutron.plugins.cisco.cfg_agent.service_helpers'
                   '.routing_svc_helper.RoutingServiceHelper',
                   help=_("Path of the routing service helper class.")),
    ]

    def __init__(self, host, conf=None):
        self.conf = conf or cfg.CONF
        self._dev_status = device_status.DeviceStatus()
        self.context = n_context.get_admin_context_without_session()

        self._initialize_rpc(host)
        self._initialize_service_helpers(host)
        self._start_periodic_tasks()
        super(CiscoCfgAgent, self).__init__(host=self.conf.host)

    def _initialize_rpc(self, host):
        self.devmgr_rpc = CiscoDeviceManagementApi(topics.L3PLUGIN, host)

    def _initialize_service_helpers(self, host):
        svc_helper_class = self.conf.routing_svc_helper_class
        try:
            self.routing_service_helper = importutils.import_object(
                svc_helper_class, host, self.conf, self)
        except ImportError as e:
            LOG.warn(
                _("Error in loading routing service helper. Class "
                  "specified is %(class)s. Reason:%(reason)s"), {
                      'class': self.conf.routing_svc_helper_class,
                      'reason': e
                  })
            self.routing_service_helper = None

    def _start_periodic_tasks(self):
        self.loop = loopingcall.FixedIntervalLoopingCall(self.process_services)
        self.loop.start(interval=self.conf.rpc_loop_interval)

    def after_start(self):
        LOG.info(_("Cisco cfg agent started"))

    def get_routing_service_helper(self):
        return self.routing_service_helper

    ## Periodic tasks ##
    @periodic_task.periodic_task
    def _backlog_task(self, context):
        """Process backlogged devices."""
        LOG.debug("Processing backlog.")
        self._process_backlogged_hosting_devices(context)

    ## Main orchestrator ##
    @lockutils.synchronized('cisco-cfg-agent', 'neutron-')
    def process_services(self, device_ids=None, removed_devices_info=None):
        """Process services managed by this config agent.

        This method is invoked by any of three scenarios.

        1. Invoked by a periodic task running every `RPC_LOOP_INTERVAL`
        seconds. This is the most common scenario.
        In this mode, the method is called without any arguments.

        2. Called by the `_process_backlogged_hosting_devices()` as part of
        the backlog processing task. In this mode, a list of device_ids
        are passed as arguments. These are the list of backlogged
        hosting devices that are now reachable and we want to sync services
        on them.

        3. Called by the `hosting_devices_removed()` method. This is when
        the config agent has received a notification from the plugin that
        some hosting devices are going to be removed. The payload contains
        the details of the hosting devices and the associated neutron
        resources on them which should be processed and removed.

        To avoid race conditions with these scenarios, this function is
        protected by a lock.

        This method goes on to invoke `process_service()` on the
        different service helpers.

        :param device_ids : List of devices that are now available and needs
         to be processed
        :param removed_devices_info: Info about the hosting devices which
        are going to be removed and details of the resources hosted on them.
        Expected Format:
                {
                 'hosting_data': {'hd_id1': {'routers': [id1, id2, ...]},
                                  'hd_id2': {'routers': [id3, id4, ...]}, ...},
                 'deconfigure': True/False
                }
        :return: None
        """
        LOG.debug("Processing services started")
        # Now we process only routing service, additional services will be
        # added in future
        if self.routing_service_helper:
            self.routing_service_helper.process_service(
                device_ids, removed_devices_info)
        else:
            LOG.warn(_("No routing service helper loaded"))
        LOG.debug("Processing services completed")

    def _process_backlogged_hosting_devices(self, context):
        """Process currently backlogged devices.

        Go through the currently backlogged devices and process them.
        For devices which are now reachable (compared to last time), we call
        `process_services()` passing the now reachable device's id.
        For devices which have passed the `hosting_device_dead_timeout` and
        hence presumed dead, execute a RPC to the plugin informing that.
        :param context: RPC context
        :return: None
        """
        res = self._dev_status.check_backlogged_hosting_devices()
        if res['reachable']:
            self.process_services(device_ids=res['reachable'])
        if res['dead']:
            LOG.debug("Reporting dead hosting devices: %s", res['dead'])
            self.devmgr_rpc.report_dead_hosting_devices(context,
                                                        hd_ids=res['dead'])

    def hosting_devices_removed(self, context, payload):
        """Deal with hosting device removed RPC message."""
        try:
            if payload['hosting_data']:
                if payload['hosting_data'].keys():
                    self.process_services(removed_devices_info=payload)
        except KeyError as e:
            LOG.error(
                _("Invalid payload format for received RPC message "
                  "`hosting_devices_removed`. Error is %{error}s. "
                  "Payload is %(payload)s"), {
                      'error': e,
                      'payload': payload
                  })
示例#2
0
from cinder.volume import volume_types

LOG = logging.getLogger(__name__)

sf_opts = [
    cfg.BoolOpt('sf_emulate_512',
                default=True,
                help='Set 512 byte emulation on volume creation; '),
    cfg.BoolOpt('sf_allow_tenant_qos',
                default=False,
                help='Allow tenants to specify QOS on create'),
    cfg.StrOpt('sf_account_prefix',
               default=socket.gethostname(),
               help='Create SolidFire accounts with this prefix'),
    cfg.IntOpt('sf_api_port',
               default=443,
               help='SolidFire API port. Useful if the device api is behind '
               'a proxy on a different port.'),
]

CONF = cfg.CONF
CONF.register_opts(sf_opts)


class SolidFireDriver(SanISCSIDriver):
    """OpenStack driver to enable SolidFire cluster.

    Version history:
        1.0 - Initial driver
        1.1 - Refactor, clone support, qos by type and minor bug fixes

    """
示例#3
0
# OpenStack imports.
from oslo.config import cfg

# Calico imports.
from calico.datamodel_v1 import (READY_KEY, CONFIG_DIR, TAGS_KEY_RE, HOST_DIR,
                                 key_for_endpoint, PROFILE_DIR,
                                 key_for_profile, key_for_profile_rules,
                                 key_for_profile_tags, key_for_config)
from calico.openstack.transport import CalicoTransport

# Register Calico-specific options.
calico_opts = [
    cfg.StrOpt('etcd_host', default='localhost',
               help="The hostname or IP of the etcd node/proxy"),
    cfg.IntOpt('etcd_port', default=4001,
               help="The port to use for the etcd node/proxy"),
]
cfg.CONF.register_opts(calico_opts, 'calico')

LOG = None
OPENSTACK_ENDPOINT_RE = re.compile(
    r'^' + HOST_DIR +
    r'/(?P<hostname>[^/]+)/.*openstack.*/endpoint/(?P<endpoint_id>[^/]+)')

json_decoder = json.JSONDecoder()

PERIODIC_RESYNC_INTERVAL_SECS = 30


class CalicoTransportEtcd(CalicoTransport):
    """Calico transport implementation based on etcd."""
示例#4
0
import glance.store
import glance.store.base
import glance.store.location

LOG = logging.getLogger(__name__)

DEFAULT_ADDR = '127.0.0.1'
DEFAULT_PORT = 7000
DEFAULT_CHUNKSIZE = 64  # in MiB

LOG = logging.getLogger(__name__)

sheepdog_opts = [
    cfg.IntOpt('sheepdog_store_chunk_size',
               default=DEFAULT_CHUNKSIZE,
               help=_('Images will be chunked into objects of this size '
                      '(in megabytes). For best performance, this should be '
                      'a power of two.')),
    cfg.IntOpt('sheepdog_store_port',
               default=DEFAULT_PORT,
               help=_('Port of sheep daemon.')),
    cfg.StrOpt('sheepdog_store_address',
               default=DEFAULT_ADDR,
               help=_('IP address of sheep daemon.'))
]

CONF = cfg.CONF
CONF.register_opts(sheepdog_opts)


class SheepdogImage:
示例#5
0
import datetime
import functools

from oslo.config import cfg

from nova.cells import rpc_driver
from nova import context
from nova.db import base
from nova import exception
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova import utils

cell_state_manager_opts = [
    cfg.IntOpt('db_check_interval',
               default=60,
               help='Seconds between getting fresh cell info from db.'),
]

LOG = logging.getLogger(__name__)

CONF = cfg.CONF
CONF.import_opt('name', 'nova.cells.opts', group='cells')
CONF.import_opt('reserve_percent', 'nova.cells.opts', group='cells')
CONF.import_opt('mute_child_interval', 'nova.cells.opts', group='cells')
#CONF.import_opt('capabilities', 'nova.cells.opts', group='cells')
CONF.register_opts(cell_state_manager_opts, group='cells')


class CellState(object):
    """Holds information for a particular cell."""
示例#6
0
_amqp1_opts = [
    cfg.StrOpt('server_request_prefix',
               default='exclusive',
               help="address prefix used when sending to a specific server"),
    cfg.StrOpt('broadcast_prefix',
               default='broadcast',
               help="address prefix used when broadcasting to all servers"),
    cfg.StrOpt('group_request_prefix',
               default='unicast',
               help="address prefix when sending to any server in group"),
    cfg.StrOpt('container_name',
               default=None,
               help='Name for the AMQP container'),
    cfg.IntOpt(
        'idle_timeout',
        default=0,  # disabled
        help='Timeout for inactive connections (in seconds)'),
    cfg.BoolOpt('trace',
                default=False,
                help='Debug: dump AMQP frames to stdout'),
    cfg.StrOpt('ssl_ca_file',
               default='',
               help="CA certificate PEM file for verifing server certificate"),
    cfg.StrOpt('ssl_cert_file',
               default='',
               help='Identifying certificate PEM file to present to clients'),
    cfg.StrOpt('ssl_key_file',
               default='',
               help='Private key PEM file used to sign cert_file certificate'),
    cfg.StrOpt('ssl_key_password',
               default=None,
示例#7
0
from cinder import db
from cinder import exception
from cinder import manager
from cinder import service
from cinder import test
from cinder import wsgi

test_service_opts = [
    cfg.StrOpt("fake_manager",
               default="cinder.tests.test_service.FakeManager",
               help="Manager for testing"),
    cfg.StrOpt("test_service_listen",
               default=None,
               help="Host to bind test service to"),
    cfg.IntOpt("test_service_listen_port",
               default=0,
               help="Port number to bind test service to"),
]

CONF = cfg.CONF
CONF.register_opts(test_service_opts)


class FakeManager(manager.Manager):
    """Fake manager for tests."""
    def __init__(self, host=None, db_driver=None, service_name=None):
        super(FakeManager, self).__init__(host=host, db_driver=db_driver)

    def test_method(self):
        return 'manager'
示例#8
0
文件: manager.py 项目: yuans/nova
from oslo.config import cfg

from nova.cells import rpcapi as cells_rpcapi
from nova.compute import rpcapi as compute_rpcapi
from nova.conductor import api as conductor_api
from nova import manager
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import memorycache


LOG = logging.getLogger(__name__)

consoleauth_opts = [
    cfg.IntOpt('console_token_ttl',
               default=600,
               help='How many seconds before deleting tokens'),
    cfg.StrOpt('consoleauth_manager',
               default='nova.consoleauth.manager.ConsoleAuthManager',
               help='Manager for console auth'),
    ]

CONF = cfg.CONF
CONF.register_opts(consoleauth_opts)
CONF.import_opt('enable', 'nova.cells.opts', group='cells')


class ConsoleAuthManager(manager.Manager):
    """Manages token based authentication."""

    RPC_API_VERSION = '1.2'
示例#9
0
#    License for the specific language governing permissions and limitations
#    under the License.

import gc
import pprint
import sys
import traceback

import eventlet
import eventlet.backdoor
import greenlet
from oslo.config import cfg

eventlet_backdoor_opts = [
    cfg.IntOpt('backdoor_port',
               default=None,
               help='port for eventlet backdoor to listen')
]

CONF = cfg.CONF
CONF.register_opts(eventlet_backdoor_opts)


def _dont_use_this():
    print "Don't use this, just disconnect instead"


def _find_objects(t):
    return filter(lambda o: isinstance(o, t), gc.get_objects())

示例#10
0
                   help=('Control for checking for default networks')),
        cfg.StrOpt('neutron_default_tenant_id',
                   default="default",
                   help=('Default tenant id when creating neutron '
                         'networks'))
    ]
    CONF.register_opts(os_network_opts)
except cfg.DuplicateOptError:
    # NOTE(jkoelker) These options are verbatim elsewhere this is here
    #                to make sure they are registered for our use.
    pass

if CONF.enable_network_quota:
    opts = [
        cfg.IntOpt('quota_networks',
                   default=3,
                   help='Number of private networks allowed per project'),
    ]
    CONF.register_opts(opts)

QUOTAS = quota.QUOTAS
LOG = logging.getLogger(__name__)
authorize = extensions.extension_authorizer('compute', 'os-tenant-networks')


def network_dict(network):
    return {
        "id": network.get("uuid") or network.get("id"),
        "cidr": network.get("cidr"),
        "label": network.get("label")
    }
示例#11
0
from wormhole.common import log as logging
from wormhole import exception
from wormhole.common import utils
from wormhole.i18n import _

from oslo.config import cfg

from . import linux_net
from . import model as network_model
from . import network

import random

network_opts = [
    cfg.IntOpt('network_device_mtu',
               default=9000,
               help='DEPRECATED: THIS VALUE SHOULD BE SET WHEN CREATING THE '
               'NETWORK. MTU setting for network interface.'),
]

CONF = cfg.CONF

CONF.register_opts(network_opts)

LOG = logging.getLogger(__name__)


class GenericVIFDriver(object):
    def plug(self, vif, instance):
        vif_type = vif['type']

        LOG.debug(
示例#12
0
文件: __init__.py 项目: yuans/nova
from oslo.config import cfg

from nova.openstack.common.gettextutils import _
from nova.openstack.common import importutils
from nova.openstack.common import local
from nova.openstack.common import log as logging

LOG = logging.getLogger(__name__)

rpc_opts = [
    cfg.StrOpt('rpc_backend',
               default='%s.impl_kombu' % __package__,
               help="The messaging module to use, defaults to kombu."),
    cfg.IntOpt('rpc_thread_pool_size',
               default=64,
               help='Size of RPC thread pool'),
    cfg.IntOpt('rpc_conn_pool_size',
               default=30,
               help='Size of RPC connection pool'),
    cfg.IntOpt('rpc_response_timeout',
               default=60,
               help='Seconds to wait for a response from call or multicall'),
    cfg.IntOpt('rpc_cast_timeout',
               default=30,
               help='Seconds to wait before a cast expires (TTL). '
               'Only supported by impl_zmq.'),
    cfg.ListOpt('allowed_rpc_exception_modules',
                default=[
                    'nova.openstack.common.exception',
                    'nova.exception',
示例#13
0
LOG = logging.getLogger(__name__)

_LI = i18n._LI
_LW = i18n._LW
_LE = i18n._LE

scrubber_opts = [
    cfg.StrOpt('scrubber_datadir',
               default='/var/lib/glance/scrubber',
               help=_('Directory that the scrubber will use to track '
                      'information about what to delete. '
                      'Make sure this is set in glance-api.conf and '
                      'glance-scrubber.conf.')),
    cfg.IntOpt('scrub_time',
               default=0,
               help=_('The amount of time in seconds to delay before '
                      'performing a delete.')),
    cfg.BoolOpt('cleanup_scrubber',
                default=False,
                help=_('A boolean that determines if the scrubber should '
                       'clean up the files it uses for taking data. Only '
                       'one server in your deployment should be designated '
                       'the cleanup host.')),
    cfg.BoolOpt('delayed_delete',
                default=False,
                help=_('Turn on/off delayed delete.')),
    cfg.IntOpt('cleanup_scrubber_time',
               default=86400,
               help=_('Items must have a modified time that is older than '
                      'this value in order to be candidates for cleanup.'))
]
示例#14
0
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

from oslo.config import cfg


mgm_group = cfg.OptGroup('mgm', 'Libra Pool Manager options')

cfg.CONF.register_group(mgm_group)

cfg.CONF.register_opts(
    [
        cfg.IntOpt('az',
                   required=True,
                   help='The az the nodes and IPs will reside in (to be '
                        'passed to the API server'),
        cfg.StrOpt('pid',
                   default='/var/run/libra/libra_mgm.pid',
                   help='PID file'),
        cfg.StrOpt('node_basename',
                   help='prepend the name of all nodes with this'),
        cfg.StrOpt('nova_auth_url',
                   required=True,
                   help='the auth URL for the Nova API'),
        cfg.StrOpt('nova_user',
                   required=True,
                   secret=True,
                   help='the username for the Nova API'),
        cfg.StrOpt('nova_pass',
                   required=True,
示例#15
0
               'if not set, all communications to Arista EOS'
               'will fail')),
    cfg.StrOpt('eapi_host',
               default='',
               help=_('Arista EOS IP address. This is required field.'
                      'If not set, all communications to Arista EOS'
                      'will fail')),
    cfg.BoolOpt('use_fqdn',
                default=True,
                help=_('Defines if hostnames are sent to Arista EOS as FQDNs'
                       '("node1.domain.com") or as short names ("node1").'
                       'This is optional. If not set, a value of "True"'
                       'is assumed.')),
    cfg.IntOpt('sync_interval',
               default=180,
               help=_('Sync interval in seconds between Neutron plugin and'
                      'EOS. This interval defines how often the'
                      'synchronization is performed. This is an optional'
                      'field. If not set, a value of 180 seconds is assumed')),
    cfg.StrOpt('region_name',
               default='RegionOne',
               help=_('Defines Region Name that is assigned to this OpenStack'
                      'Controller. This is useful when multiple'
                      'OpenStack/Neutron controllers are managing the same'
                      'Arista HW clusters. Note that this name must match with'
                      'the region name registered (or known) to keystone'
                      'service. Authentication with Keysotne is performed by'
                      'EOS. This is optional. If not set, a value of'
                      '"RegionOne" is assumed'))
]

cfg.CONF.register_opts(ARISTA_DRIVER_OPTS, "ml2_arista")
示例#16
0
LOG = logging.getLogger(__name__)

trusted_opts = [
    cfg.StrOpt('attestation_server', help='Attestation server HTTP'),
    cfg.StrOpt('attestation_server_ca_file',
               help='Attestation server Cert file for Identity verification'),
    cfg.StrOpt('attestation_port',
               default='8443',
               help='Attestation server port'),
    cfg.StrOpt('attestation_api_url',
               default='/OpenAttestationWebServices/V1.0',
               help='Attestation web API URL'),
    cfg.StrOpt('attestation_auth_blob',
               help='Attestation authorization blob - must change'),
    cfg.IntOpt('attestation_auth_timeout',
               default=60,
               help='Attestation status cache valid period length'),
]

CONF = cfg.CONF
trust_group = cfg.OptGroup(name='trusted_computing', title='Trust parameters')
CONF.register_group(trust_group)
CONF.register_opts(trusted_opts, group=trust_group)


class HTTPSClientAuthConnection(httplib.HTTPSConnection):
    """Class to make a HTTPS connection, with support for full client-based
    SSL Authentication
    """
    def __init__(self, host, port, key_file, cert_file, ca_file, timeout=None):
        httplib.HTTPSConnection.__init__(self,
示例#17
0
文件: __init__.py 项目: onodes/glance
from glance.common import utils
from glance.openstack.common import excutils
from glance.openstack.common import gettextutils
from glance.openstack.common import importutils
import glance.openstack.common.log as logging
from glance.openstack.common import units

LOG = logging.getLogger(__name__)
_LE = gettextutils._LE
_LI = gettextutils._LI
_LW = gettextutils._LW

image_cache_opts = [
    cfg.StrOpt('image_cache_driver', default='sqlite',
               help=_('The driver to use for image cache management.')),
    cfg.IntOpt('image_cache_max_size', default=10 * units.Gi,  # 10 GB
               help=_('The maximum size in bytes that the cache can use.')),
    cfg.IntOpt('image_cache_stall_time', default=86400,  # 24 hours
               help=_('The amount of time to let an image remain in the '
                      'cache without being accessed.')),
    cfg.StrOpt('image_cache_dir',
               help=_('Base directory that the Image Cache uses.')),
]

CONF = cfg.CONF
CONF.register_opts(image_cache_opts)


class ImageCache(object):

    """Provides an LRU cache for image data."""
示例#18
0
文件: config.py 项目: JoeChan/openbgp
import logging
import sys

from oslo.config import cfg

CONF = cfg.CONF

msg_process_opts = [
    cfg.BoolOpt('write_disk',
                default=True,
                help='Whether the BGP message is written to disk'),
    cfg.StrOpt('write_dir',
               default='/home/bgpmon/data/bgp/',
               help='The BGP messages storage path'),
    cfg.IntOpt('write_msg_max_size',
               default=500,
               help='The Max size of one BGP message file, the unit is MB'),
]

CONF.register_opts(msg_process_opts, group='message')

bgp_config_opts = [
    cfg.IntOpt('peer_start_interval',
               default=10,
               help='The interval to start each BGP peer'),
    cfg.ListOpt(
        'afi_safi',
        default=['ipv4'],
        help='The Global config for address family and sub address family'),
    cfg.DictOpt('running_config',
                default={},
示例#19
0
import sqlalchemy as sa
from sqlalchemy.orm import exc

from neutron.db import model_base
from neutron.db import models_v2
from neutron.extensions import agent as ext_agent
from neutron import manager
from neutron.openstack.common.db import exception as db_exc
from neutron.openstack.common import jsonutils
from neutron.openstack.common import log as logging
from neutron.openstack.common import timeutils

LOG = logging.getLogger(__name__)
cfg.CONF.register_opt(
    cfg.IntOpt('agent_down_time', default=9,
               help=_("Seconds to regard the agent is down; should be at "
                      "least twice report_interval, to be sure the "
                      "agent is down for good.")))


class Agent(model_base.BASEV2, models_v2.HasId):
    """Represents agents running in neutron deployments."""

    __table_args__ = (
        sa.UniqueConstraint('agent_type', 'host',
                            name='uniq_agents0agent_type0host'),
    )

    # L3 agent, DHCP agent, OVS agent, LinuxBridge
    agent_type = sa.Column(sa.String(255), nullable=False)
    binary = sa.Column(sa.String(255), nullable=False)
    # TOPIC is a fanout exchange topic
示例#20
0
文件: lvm.py 项目: mshabdiz/cinder
from cinder.openstack.common import fileutils
from cinder.openstack.common import log as logging
from cinder.openstack.common import processutils
from cinder import units
from cinder import utils
from cinder.volume import driver
from cinder.volume import utils as volutils

LOG = logging.getLogger(__name__)

volume_opts = [
    cfg.StrOpt('volume_group',
               default='cinder-volumes',
               help='Name for the VG that will contain exported volumes'),
    cfg.IntOpt('lvm_mirrors',
               default=0,
               help='If set, create lvms with multiple mirrors. Note that '
               'this requires lvm_mirrors + 2 pvs with available space'),
    cfg.StrOpt('lvm_type',
               default='default',
               help='Type of LVM volumes to deploy; (default or thin)'),
]

CONF = cfg.CONF
CONF.register_opts(volume_opts)


class LVMVolumeDriver(driver.VolumeDriver):
    """Executes commands relating to Volumes."""

    VERSION = '2.0.0'
示例#21
0
文件: agent.py 项目: zioc/neutron
class MetadataProxyHandler(object):
    OPTS = [
        cfg.StrOpt('admin_user', help=_("Admin user")),
        cfg.StrOpt('admin_password', help=_("Admin password"), secret=True),
        cfg.StrOpt('admin_tenant_name', help=_("Admin tenant name")),
        cfg.StrOpt('auth_url', help=_("Authentication URL")),
        cfg.StrOpt('auth_strategy',
                   default='keystone',
                   help=_("The type of authentication to use")),
        cfg.StrOpt('auth_region', help=_("Authentication region")),
        cfg.StrOpt('endpoint_type',
                   default='adminURL',
                   help=_("Network service endpoint type to pull from "
                          "the keystone catalog")),
        cfg.StrOpt('nova_metadata_ip',
                   default='127.0.0.1',
                   help=_("IP address used by Nova metadata server.")),
        cfg.IntOpt('nova_metadata_port',
                   default=8775,
                   help=_("TCP Port used by Nova metadata server.")),
        cfg.StrOpt('metadata_proxy_shared_secret',
                   default='',
                   help=_('Shared secret to sign instance-id request'),
                   secret=True)
    ]

    def __init__(self, conf):
        self.conf = conf
        self.auth_info = {}

    def _get_neutron_client(self):
        qclient = client.Client(
            username=self.conf.admin_user,
            password=self.conf.admin_password,
            tenant_name=self.conf.admin_tenant_name,
            auth_url=self.conf.auth_url,
            auth_strategy=self.conf.auth_strategy,
            region_name=self.conf.auth_region,
            auth_token=self.auth_info.get('auth_token'),
            endpoint_url=self.auth_info.get('endpoint_url'),
            endpoint_type=self.conf.endpoint_type)
        return qclient

    @webob.dec.wsgify(RequestClass=webob.Request)
    def __call__(self, req):
        try:
            LOG.debug(_("Request: %s"), req)

            instance_id = self._get_instance_id(req)
            if instance_id:
                return self._proxy_request(instance_id, req)
            else:
                return webob.exc.HTTPNotFound()

        except Exception:
            LOG.exception(_("Unexpected error."))
            msg = _('An unknown error has occurred. '
                    'Please try your request again.')
            return webob.exc.HTTPInternalServerError(explanation=unicode(msg))

    def _get_instance_id(self, req):
        qclient = self._get_neutron_client()

        remote_address = req.headers.get('X-Forwarded-For')
        network_id = req.headers.get('X-Neutron-Network-ID')
        router_id = req.headers.get('X-Neutron-Router-ID')

        if network_id:
            networks = [network_id]
        else:
            internal_ports = qclient.list_ports(
                device_id=router_id,
                device_owner=DEVICE_OWNER_ROUTER_INTF)['ports']

            networks = [p['network_id'] for p in internal_ports]

        ports = qclient.list_ports(network_id=networks,
                                   fixed_ips=[
                                       'ip_address=%s' % remote_address
                                   ])['ports']

        self.auth_info = qclient.get_auth_info()

        if len(ports) == 1:
            return ports[0]['device_id']

    def _proxy_request(self, instance_id, req):
        headers = {
            'X-Forwarded-For': req.headers.get('X-Forwarded-For'),
            'X-Instance-ID': instance_id,
            'X-Instance-ID-Signature': self._sign_instance_id(instance_id)
        }

        url = urlparse.urlunsplit(
            ('http', '%s:%s' %
             (self.conf.nova_metadata_ip, self.conf.nova_metadata_port),
             req.path_info, req.query_string, ''))

        h = httplib2.Http()
        resp, content = h.request(url,
                                  method=req.method,
                                  headers=headers,
                                  body=req.body)

        if resp.status == 200:
            LOG.debug(str(resp))
            return content
        elif resp.status == 403:
            msg = _(
                'The remote metadata server responded with Forbidden. This '
                'response usually occurs when shared secrets do not match.')
            LOG.warn(msg)
            return webob.exc.HTTPForbidden()
        elif resp.status == 404:
            return webob.exc.HTTPNotFound()
        elif resp.status == 409:
            return webob.exc.HTTPConflict()
        elif resp.status == 500:
            msg = _(
                'Remote metadata server experienced an internal server error.')
            LOG.warn(msg)
            return webob.exc.HTTPInternalServerError(explanation=unicode(msg))
        else:
            raise Exception(_('Unexpected response code: %s') % resp.status)

    def _sign_instance_id(self, instance_id):
        return hmac.new(self.conf.metadata_proxy_shared_secret, instance_id,
                        hashlib.sha256).hexdigest()
示例#22
0
from nova import exception
from nova.openstack.common import log as logging
from nova.openstack.common import loopingcall
from nova.openstack.common import processutils
from nova import paths
from nova.storage import linuxscsi
from nova import utils
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import utils as virtutils

LOG = logging.getLogger(__name__)

volume_opts = [
    cfg.IntOpt('num_iscsi_scan_tries',
               default=3,
               help='number of times to rescan iSCSI target to find volume'),
    cfg.StrOpt('rbd_user',
               default=None,
               help='the RADOS client name for accessing rbd volumes'),
    cfg.StrOpt('rbd_secret_uuid',
               default=None,
               help='the libvirt uuid of the secret for the rbd_user'
               'volumes'),
    cfg.StrOpt('nfs_mount_point_base',
               default=paths.state_path_def('mnt'),
               help='Dir where the nfs volume is mounted on the compute node'),
    cfg.StrOpt('nfs_mount_options',
               default=None,
               help='Mount options passed to the nfs client. See section '
               'of the nfs man page for details'),
示例#23
0
            deprecated_opts=[
                cfg.DeprecatedOpt('sql_connection', group='DEFAULT'),
                cfg.DeprecatedOpt('sql_connection', group='DATABASE'),
                cfg.DeprecatedOpt('connection', group='sql'),
            ]),
 cfg.StrOpt('mysql_sql_mode',
            default='TRADITIONAL',
            help='The SQL mode to be used for MySQL sessions. '
            'This option, including the default, overrides any '
            'server-set SQL mode. To use whatever SQL mode '
            'is set by the server configuration, '
            'set this to no value. Example: mysql_sql_mode='),
 cfg.IntOpt('idle_timeout',
            default=3600,
            deprecated_opts=[
                cfg.DeprecatedOpt('sql_idle_timeout', group='DEFAULT'),
                cfg.DeprecatedOpt('sql_idle_timeout', group='DATABASE'),
                cfg.DeprecatedOpt('idle_timeout', group='sql')
            ],
            help='Timeout before idle sql connections are reaped'),
 cfg.IntOpt('min_pool_size',
            default=1,
            deprecated_opts=[
                cfg.DeprecatedOpt('sql_min_pool_size', group='DEFAULT'),
                cfg.DeprecatedOpt('sql_min_pool_size', group='DATABASE')
            ],
            help='Minimum number of SQL connections to keep open in a '
            'pool'),
 cfg.IntOpt('max_pool_size',
            default=None,
            deprecated_opts=[
                cfg.DeprecatedOpt('sql_max_pool_size', group='DEFAULT'),
示例#24
0
from glance.version import version_info as version

paste_deploy_opts = [
    cfg.StrOpt('flavor'),
    cfg.StrOpt('config_file'),
]
common_opts = [
    cfg.BoolOpt('allow_additional_image_properties',
                default=True,
                help=_('Whether to allow users to specify image properties '
                       'beyond what the image schema provides')),
    cfg.StrOpt('data_api',
               default='glance.db.sqlalchemy.api',
               help=_('Python module path of data access API')),
    cfg.IntOpt('limit_param_default',
               default=25,
               help=_('Default value for the number of items returned by a '
                      'request if not specified explicitly in the request')),
    cfg.IntOpt('api_limit_max',
               default=1000,
               help=_('Maximum permissible number of items that could be '
                      'returned by a request')),
    cfg.BoolOpt('show_image_direct_url',
                default=False,
                help=_(
                    'Whether to include the backend image storage location '
                    'in image properties. Revealing storage location can be a '
                    'security risk, so use this setting with caution!')),
    cfg.IntOpt('image_size_cap',
               default=1099511627776,
               help=_("Maximum size of image a user can upload in bytes. "
                      "Defaults to 1099511627776 bytes (1 TB).")),
示例#25
0
文件: driver.py 项目: samalba/nova
xenapi_opts = [
    cfg.StrOpt('xenapi_connection_url',
               help='URL for connection to XenServer/Xen Cloud Platform. '
               'A special value of unix://local can be used to connect '
               'to the local unix socket.  '
               'Required if compute_driver=xenapi.XenAPIDriver'),
    cfg.StrOpt('xenapi_connection_username',
               default='root',
               help='Username for connection to XenServer/Xen Cloud Platform. '
               'Used only if compute_driver=xenapi.XenAPIDriver'),
    cfg.StrOpt('xenapi_connection_password',
               help='Password for connection to XenServer/Xen Cloud Platform. '
               'Used only if compute_driver=xenapi.XenAPIDriver',
               secret=True),
    cfg.IntOpt('xenapi_connection_concurrent',
               default=5,
               help='Maximum number of concurrent XenAPI connections. '
               'Used only if compute_driver=xenapi.XenAPIDriver'),
    cfg.FloatOpt('xenapi_vhd_coalesce_poll_interval',
                 default=5.0,
                 help='The interval used for polling of coalescing vhds. '
                 'Used only if compute_driver=xenapi.XenAPIDriver'),
    cfg.BoolOpt('xenapi_check_host',
                default=True,
                help='Ensure compute service is running on host XenAPI '
                'connects to.'),
    cfg.IntOpt('xenapi_vhd_coalesce_max_attempts',
               default=5,
               help='Max number of times to poll for VHD to coalesce. '
               'Used only if compute_driver=xenapi.XenAPIDriver'),
    cfg.StrOpt('xenapi_sr_base_path',
               default='/var/run/sr-mount',
示例#26
0
swiftbackup_service_opts = [
    cfg.StrOpt('backup_swift_url',
               default='http://localhost:8080/v1/AUTH_',
               help='The URL of the Swift endpoint'),
    cfg.StrOpt('backup_swift_auth',
               default='per_user',
               help='Swift authentication mechanism'),
    cfg.StrOpt('backup_swift_user', default=None, help='Swift user name'),
    cfg.StrOpt('backup_swift_key',
               default=None,
               help='Swift key for authentication'),
    cfg.StrOpt('backup_swift_container',
               default='volumebackups',
               help='The default Swift container to use'),
    cfg.IntOpt('backup_swift_object_size',
               default=52428800,
               help='The size in bytes of Swift backup objects'),
    cfg.IntOpt('backup_swift_retry_attempts',
               default=3,
               help='The number of retries to make for Swift operations'),
    cfg.IntOpt('backup_swift_retry_backoff',
               default=2,
               help='The backoff time in seconds between Swift retries'),
    cfg.StrOpt('backup_compression_algorithm',
               default='zlib',
               help='Compression algorithm (None to disable)'),
]

CONF = cfg.CONF
CONF.register_opts(swiftbackup_service_opts)
示例#27
0
#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
#    License for the specific language governing permissions and limitations
#    under the License.
"""Base class for all backup drivers."""

from cinder.db import base
from cinder import exception
from cinder.openstack.common import jsonutils
from cinder.openstack.common import log as logging
from oslo.config import cfg

service_opts = [
    cfg.IntOpt('backup_metadata_version',
               default=1,
               help='Backup metadata version to be used when backing up '
               'volume metadata. If this number is bumped, make sure the '
               'service doing the restore supports the new version.')
]

CONF = cfg.CONF
CONF.register_opts(service_opts)

LOG = logging.getLogger(__name__)


class BackupMetadataAPI(base.Base):

    TYPE_TAG_VOL_BASE_META = 'volume-base-metadata'
    TYPE_TAG_VOL_META = 'volume-metadata'
    TYPE_TAG_VOL_GLANCE_META = 'volume-glance-metadata'
示例#28
0
文件: config.py 项目: afliu/neutron
class AgentModes:
    AGENT = 'agent'
    # TODO(armando-migliaccio): support to be added, maybe we could add a
    # mixed mode to support no-downtime migrations?
    AGENTLESS = 'agentless'


class MetadataModes:
    DIRECT = 'access_network'
    INDIRECT = 'dhcp_host_route'


nvp_opts = [
    cfg.IntOpt('max_lp_per_bridged_ls', default=5000,
               help=_("Maximum number of ports of a logical switch on a "
                      "bridged transport zone (default 5000)")),
    cfg.IntOpt('max_lp_per_overlay_ls', default=256,
               help=_("Maximum number of ports of a logical switch on an "
                      "overlay transport zone (default 256)")),
    cfg.IntOpt('concurrent_connections', default=10,
               help=_("Maximum concurrent connections to each NVP "
                      "controller.")),
    cfg.IntOpt('nvp_gen_timeout', default=-1,
               help=_("Number of seconds a generation id should be valid for "
                      "(default -1 meaning do not time out)")),
    cfg.StrOpt('metadata_mode', default=MetadataModes.DIRECT,
               help=_("If set to access_network this enables a dedicated "
                      "connection to the metadata proxy for metadata server "
                      "access via Neutron router. If set to dhcp_host_route "
                      "this enables host route injection via the dhcp agent. "
示例#29
0
import pprint
import time

LOG = logging.getLogger(__name__)

cfg.CONF.register_group(
    cfg.OptGroup(name='backend:ipa', title="Configuration for IPA Backend"))

IPA_DEFAULT_PORT = 443

OPTS = [
    cfg.StrOpt('ipa-host',
               default='localhost.localdomain',
               help='IPA RPC listener host - must be FQDN'),
    cfg.IntOpt('ipa-port',
               default=IPA_DEFAULT_PORT,
               help='IPA RPC listener port'),
    cfg.StrOpt('ipa-client-keytab',
               default=None,
               help='Kerberos client keytab file'),
    cfg.StrOpt('ipa-auth-driver-class',
               default='designate.backend.impl_ipa.auth.IPAAuth',
               help='Class that implements the authentication '
               'driver for IPA'),
    cfg.StrOpt('ipa-ca-cert',
               default=None,
               help='CA certificate for use with https to IPA'),
    cfg.StrOpt('ipa-base-url',
               default='/ipa',
               help='Base URL for IPA RPC, relative to host[:port]'),
    cfg.StrOpt('ipa-json-url',
示例#30
0
LOG = logging.getLogger(__name__)

imagecache_opts = [
    cfg.StrOpt('image_info_filename_pattern',
               default='$instances_path/$image_cache_subdirectory_name/'
               '%(image)s.info',
               help='Allows image information files to be stored in '
               'non-standard locations'),
    cfg.BoolOpt('remove_unused_kernels',
                default=False,
                help='Should unused kernel images be removed? This is only '
                'safe to enable if all compute nodes have been updated '
                'to support this option. This will be enabled by default '
                'in future.'),
    cfg.IntOpt('remove_unused_resized_minimum_age_seconds',
               default=3600,
               help='Unused resized base images younger than this will not be '
               'removed'),
    cfg.BoolOpt('checksum_base_images',
                default=False,
                help='Write a checksum for files in _base to disk'),
    cfg.IntOpt('checksum_interval_seconds',
               default=3600,
               help='How frequently to checksum base images'),
]

CONF = cfg.CONF
CONF.register_opts(imagecache_opts, 'libvirt')
CONF.import_opt('instances_path', 'nova.compute.manager')
CONF.import_opt('image_cache_subdirectory_name', 'nova.virt.imagecache')