예제 #1
0
파일: twistd.py 프로젝트: bopopescu/cc
 def _absorbFlags(self):
     twistd_flags = []
     reflect.accumulateClassList(self.__class__, 'optFlags',
                                 twistd_flags)
     for flag in twistd_flags:
         key = flag[0].replace('-', '_')
         flags.DEFINE_boolean(key, None, str(flag[-1]))
예제 #2
0
        def _absorbHandlers(self):
            twistd_handlers = {}
            reflect.addMethodNamesToDict(self.__class__, twistd_handlers, "opt_")

            # NOTE(termie): Much of the following is derived/copied from
            #               twisted.python.usage with the express purpose of
            #               providing compatibility
            for name in twistd_handlers.keys():
                method = getattr(self, 'opt_'+name)

                takesArg = not usage.flagFunction(method, name)
                doc = getattr(method, '__doc__', None)
                if not doc:
                    doc = 'undocumented'

                if not takesArg:
                    if name not in FLAGS:
                        flags.DEFINE_boolean(name, None, doc)
                    self._flagHandlers[name] = method
                else:
                    if name not in FLAGS:
                        flags.DEFINE_string(name, None, doc)
                    self._paramHandlers[name] = method
예제 #3
0
"""
The BaseScheduler is the base class Scheduler for creating instances
across zones. There are two expansion points to this class for:
1. Assigning Weights to hosts for requested instances
2. Filtering Hosts based on required instance capabilities
"""

from nova import flags
from nova import log as logging

from nova.scheduler import abstract_scheduler
from nova.scheduler import host_filter

FLAGS = flags.FLAGS
flags.DEFINE_boolean('spread_first', False,
                     'Use a spread-first zone scheduler strategy')
LOG = logging.getLogger('nova.scheduler.base_scheduler')


class BaseScheduler(abstract_scheduler.AbstractScheduler):
    """Base class for creating Schedulers that can work across any nova
    deployment, from simple designs to multiply-nested zones.
    """
    def filter_hosts(self, topic, request_spec, hosts=None):
        """Filter the full host list (from the ZoneManager)"""
        filter_name = request_spec.get('filter', None)
        # Make sure that the requested filter is legitimate.
        selected_filter = host_filter.choose_host_filter(filter_name)

        # TODO(sandy): We're only using InstanceType-based specs
        # currently. Later we'll need to snoop for more detailed
예제 #4
0
from nova import flags
from nova import log as logging

LOG = logging.getLogger("nova.crypto")

FLAGS = flags.FLAGS
flags.DEFINE_string('ca_file', 'cacert.pem', _('Filename of root CA'))
flags.DEFINE_string('key_file', os.path.join('private', 'cakey.pem'),
                    _('Filename of private key'))
flags.DEFINE_string('crl_file', 'crl.pem',
                    _('Filename of root Certificate Revokation List'))
flags.DEFINE_string('keys_path', '$state_path/keys',
                    _('Where we keep our keys'))
flags.DEFINE_string('ca_path', '$state_path/CA',
                    _('Where we keep our root CA'))
flags.DEFINE_boolean('use_project_ca', False,
                     _('Should we use a CA for each project?'))
flags.DEFINE_string(
    'user_cert_subject', '/C=US/ST=California/L=MountainView/O=AnsoLabs/'
    'OU=NovaDev/CN=%s-%s-%s',
    _('Subject for certificate for users, '
      '%s for project, user, timestamp'))
flags.DEFINE_string(
    'project_cert_subject', '/C=US/ST=California/L=MountainView/O=AnsoLabs/'
    'OU=NovaDev/CN=project-ca-%s-%s',
    _('Subject for certificate for projects, '
      '%s for project, timestamp'))
flags.DEFINE_string(
    'vpn_cert_subject', '/C=US/ST=California/L=MountainView/O=AnsoLabs/'
    'OU=NovaDev/CN=project-vpn-%s-%s',
    _('Subject for certificate for vpns, '
      '%s for project, timestamp'))
예제 #5
0
파일: rpc.py 프로젝트: termie/pupa
from carrot import messaging
from eventlet import greenpool
from eventlet import greenthread

from nova import context
from nova import exception
from nova import fakerabbit
from nova import flags
from nova import log as logging
from nova import utils

LOG = logging.getLogger('nova.rpc')

FLAGS = flags.FLAGS
flags.DEFINE_integer('rpc_thread_pool_size', 1024, 'Size of RPC thread pool')
flags.DEFINE_boolean('fake_rabbit', False, 'use a fake rabbit')
flags.DEFINE_string('rabbit_host', 'localhost', 'rabbit host')
flags.DEFINE_integer('rabbit_port', 5672, 'rabbit port')
flags.DEFINE_string('rabbit_userid', 'guest', 'rabbit userid')
flags.DEFINE_string('rabbit_password', 'guest', 'rabbit password')
flags.DEFINE_string('rabbit_virtual_host', '/', 'rabbit virtual host')
flags.DEFINE_integer('rabbit_retry_interval', 10,
                     'rabbit connection retry interval')
flags.DEFINE_integer('rabbit_max_retries', 12, 'rabbit connection attempts')
flags.DEFINE_string('control_exchange', 'nova',
                    'the main exchange to connect to')


class Connection(carrot_connection.BrokerConnection):
    """Connection instance object."""
    @classmethod
예제 #6
0
from nova import context
from nova import exception
from nova import flags
from nova import log as logging
from nova import manager
from nova import utils


LOG = logging.getLogger('nova.volume.manager')
FLAGS = flags.FLAGS
flags.DEFINE_string('storage_availability_zone',
                    'nova',
                    'availability zone of this service')
flags.DEFINE_string('volume_driver', 'nova.volume.driver.ISCSIDriver',
                    'Driver to use for volume creation')
flags.DEFINE_boolean('use_local_volumes', True,
                     'if True, will not discover local volumes')


class VolumeManager(manager.SchedulerDependentManager):
    """Manages attachable block storage devices."""
    def __init__(self, volume_driver=None, *args, **kwargs):
        """Load the driver from the one specified in args, or from flags."""
        if not volume_driver:
            volume_driver = FLAGS.volume_driver
        self.driver = utils.import_object(volume_driver)
        super(VolumeManager, self).__init__(service_name='volume',
                                                    *args, **kwargs)
        # NOTE(vish): Implementation specific db handling is done
        #             by the driver.
        self.driver.db = self.db
예제 #7
0
from nova import rpc
from nova import utils
from nova import exception
from nova.scheduler import driver
from nova.scheduler import simple
from nova.vsa.api import VsaState
from nova.volume import volume_types

LOG = logging.getLogger('nova.scheduler.vsa')

FLAGS = flags.FLAGS
flags.DEFINE_integer('drive_type_approx_capacity_percent', 10,
                     'The percentage range for capacity comparison')
flags.DEFINE_integer('vsa_unique_hosts_per_alloc', 10,
                     'The number of unique hosts per storage allocation')
flags.DEFINE_boolean('vsa_select_unique_drives', True,
                     'Allow selection of same host for multiple drives')


def BYTES_TO_GB(bytes):
    return bytes >> 30


def GB_TO_BYTES(gb):
    return gb << 30


class VsaScheduler(simple.SimpleScheduler):
    """Implements Scheduler for volume placement."""
    def __init__(self, *args, **kwargs):
        super(VsaScheduler, self).__init__(*args, **kwargs)
        self._notify_all_volume_hosts("startup")
예제 #8
0
import tempfile
import time
import utils

from nova import vendor
import M2Crypto

from nova import exception
from nova import flags


FLAGS = flags.FLAGS
flags.DEFINE_string('ca_file', 'cacert.pem', 'Filename of root CA')
flags.DEFINE_string('keys_path', utils.abspath('../keys'), 'Where we keep our keys')
flags.DEFINE_string('ca_path', utils.abspath('../CA'), 'Where we keep our root CA')
flags.DEFINE_boolean('use_intermediate_ca', False, 'Should we use intermediate CAs for each project?')

def ca_path(project_id):
    if project_id:
        return "%s/INTER/%s/cacert.pem" % (FLAGS.ca_path, project_id)
    return "%s/cacert.pem" % (FLAGS.ca_path)

def fetch_ca(project_id=None, chain=True):
    if not FLAGS.use_intermediate_ca:
        project_id = None
    buffer = ""
    if project_id:
        with open(ca_path(project_id),"r") as cafile:
            buffer += cafile.read()
        if not chain:
            return buffer
예제 #9
0
import os
import paramiko

from xml.etree import ElementTree

from nova import exception
from nova import flags
from nova import log as logging
from nova import utils
from nova.utils import ssh_execute
from nova.volume.driver import ISCSIDriver

LOG = logging.getLogger("nova.volume.driver")
FLAGS = flags.FLAGS
flags.DEFINE_boolean('san_thin_provision', 'true',
                     'Use thin provisioning for SAN volumes?')
flags.DEFINE_string('san_ip', '', 'IP address of SAN controller')
flags.DEFINE_string('san_login', 'admin', 'Username for SAN controller')
flags.DEFINE_string('san_password', '', 'Password for SAN controller')
flags.DEFINE_string('san_privatekey', '',
                    'Filename of private key to use for SSH authentication')
flags.DEFINE_string('san_clustername', '',
                    'Cluster name to use for creating volumes')
flags.DEFINE_integer('san_ssh_port', 22, 'SSH port to use with SAN')
flags.DEFINE_boolean(
    'san_is_local', 'false', 'Execute commands locally instead of over SSH; '
    'use if the volume service is running on the SAN device')
flags.DEFINE_string('san_zfs_volume_base', 'rpool/',
                    'The ZFS path under which to create zvols for volumes.')

예제 #10
0
FLAGS = flags.FLAGS
flags.DEFINE_integer('ldap_schema_version', 2,
                     'Current version of the LDAP schema')
flags.DEFINE_string('ldap_url', 'ldap://localhost',
                    'Point this at your ldap server')
flags.DEFINE_string('ldap_password', 'changeme', 'LDAP password')
flags.DEFINE_string('ldap_user_dn', 'cn=Manager,dc=example,dc=com',
                    'DN of admin user')
flags.DEFINE_string('ldap_user_id_attribute', 'uid', 'Attribute to use as id')
flags.DEFINE_string('ldap_user_name_attribute', 'cn',
                    'Attribute to use as name')
flags.DEFINE_string('ldap_user_unit', 'Users', 'OID for Users')
flags.DEFINE_string('ldap_user_subtree', 'ou=Users,dc=example,dc=com',
                    'OU for Users')
flags.DEFINE_boolean('ldap_user_modify_only', False,
                    'Modify attributes for users instead of creating/deleting')
flags.DEFINE_string('ldap_project_subtree', 'ou=Groups,dc=example,dc=com',
                    'OU for Projects')
flags.DEFINE_string('role_project_subtree', 'ou=Groups,dc=example,dc=com',
                    'OU for Roles')

# NOTE(vish): mapping with these flags is necessary because we're going
#             to tie in to an existing ldap schema
flags.DEFINE_string('ldap_cloudadmin',
    'cn=cloudadmins,ou=Groups,dc=example,dc=com', 'cn for Cloud Admins')
flags.DEFINE_string('ldap_itsec',
    'cn=itsec,ou=Groups,dc=example,dc=com', 'cn for ItSec')
flags.DEFINE_string('ldap_sysadmin',
    'cn=sysadmins,ou=Groups,dc=example,dc=com', 'cn for Sysadmins')
flags.DEFINE_string('ldap_netadmin',
    'cn=netadmins,ou=Groups,dc=example,dc=com', 'cn for NetAdmins')
예제 #11
0
파일: server.py 프로젝트: yamahata/nova
import os
import sys

from nova import flags
from nova import log as logging
from nova import version
from nova import wsgi
from nova.vnc import auth
from nova.vnc import proxy

LOG = logging.getLogger('nova.vncproxy')
FLAGS = flags.FLAGS
flags.DEFINE_string('vncproxy_wwwroot', '/var/lib/nova/noVNC/',
                    'Full path to noVNC directory')
flags.DEFINE_boolean('vnc_debug', False,
                     'Enable debugging features, like token bypassing')
flags.DEFINE_integer('vncproxy_port', 6080,
                     'Port that the VNC proxy should bind to')
flags.DEFINE_string('vncproxy_host', '0.0.0.0',
                    'Address that the VNC proxy should bind to')
flags.DEFINE_integer('vncproxy_flash_socket_policy_port', 843,
                     'Port that the socket policy listener should bind to')
flags.DEFINE_string('vncproxy_flash_socket_policy_host', '0.0.0.0',
                    'Address that the socket policy listener should bind to')
flags.DEFINE_integer('vnc_token_ttl', 300,
                     'How many seconds before deleting tokens')
flags.DEFINE_string('vncproxy_manager', 'nova.vnc.auth.VNCProxyAuthManager',
                    'Manager for vncproxy auth')


def get_wsgi_server():
예제 #12
0
from nova import utils
from nova import validate

FLAGS = flags.FLAGS
flags.DEFINE_string('storage_dev', '/dev/sdb',
                    'Physical device to use for volumes')
flags.DEFINE_string('volume_group', 'nova-volumes',
                    'Name for the VG that will contain exported volumes')
flags.DEFINE_string('aoe_eth_dev', 'eth0',
                    'Which device to export the volumes on')
flags.DEFINE_string('storage_name', socket.gethostname(), 'name of this node')
flags.DEFINE_integer('shelf_id', utils.last_octet(utils.get_my_ip()),
                     'AoE shelf_id for this node')
flags.DEFINE_string('storage_availability_zone', 'nova',
                    'availability zone of this node')
flags.DEFINE_boolean('fake_storage', False,
                     'Should we make real storage volumes to attach?')

# TODO(joshua) Index of volumes by project


def get_volume(volume_id):
    """ Returns a redis-backed volume object """
    volume_class = Volume
    if FLAGS.fake_storage:
        volume_class = FakeVolume
    if datastore.Redis.instance().sismember('volumes', volume_id):
        return volume_class(volume_id=volume_id)
    raise exception.Error("Volume does not exist")


class BlockStore(object):
예제 #13
0
"""

import os
import paramiko

from xml.etree import ElementTree

from nova import exception
from nova import flags
from nova import log as logging
from nova.utils import ssh_execute
from nova.volume.driver import ISCSIDriver

LOG = logging.getLogger("nova.volume.driver")
FLAGS = flags.FLAGS
flags.DEFINE_boolean('san_thin_provision', 'true',
                     'Use thin provisioning for SAN volumes?')
flags.DEFINE_string('san_ip', '', 'IP address of SAN controller')
flags.DEFINE_string('san_login', 'admin', 'Username for SAN controller')
flags.DEFINE_string('san_password', '', 'Password for SAN controller')
flags.DEFINE_string('san_privatekey', '',
                    'Filename of private key to use for SSH authentication')
flags.DEFINE_string('san_clustername', '',
                    'Cluster name to use for creating volumes')
flags.DEFINE_integer('san_ssh_port', 22, 'SSH port to use with SAN')


class SanISCSIDriver(ISCSIDriver):
    """ Base class for SAN-style storage volumes

    A SAN-style storage value is 'different' because the volume controller
    probably won't run on it, so we need to access is over SSH or another
예제 #14
0
파일: simple.py 프로젝트: rbenali/nova
from nova import exception
from nova.scheduler import driver
from nova.scheduler import chance

FLAGS = flags.FLAGS
flags.DEFINE_integer("max_cores", 16,
                     "maximum number of instance cores to allow per host")
flags.DEFINE_integer("max_gigabytes", 10000,
                     "maximum number of volume gigabytes to allow per host")
flags.DEFINE_integer("max_networks", 1000,
                     "maximum number of networks to allow per host")
flags.DEFINE_string('default_schedule_zone', None,
                    'zone to use when user doesnt specify one')
flags.DEFINE_list('isolated_images', [], 'Images to run on isolated host')
flags.DEFINE_list('isolated_hosts', [], 'Host reserved for specific images')
flags.DEFINE_boolean('skip_isolated_core_check', True,
                     'Allow overcommitting vcpus on isolated hosts')


class SimpleScheduler(chance.ChanceScheduler):
    """Implements Naive Scheduler that tries to find least loaded host."""
    def _schedule_instance(self, context, instance_opts, *_args, **_kwargs):
        """Picks a host that is up and has the fewest running instances."""
        elevated = context.elevated()

        availability_zone = instance_opts.get('availability_zone')

        zone, host = FLAGS.default_schedule_zone, None
        if availability_zone:
            zone, _x, host = availability_zone.partition(':')

        if host and context.is_admin:
예제 #15
0
파일: auth.py 프로젝트: tomchill/reddwarf
#    under the License.
"""
Common Auth Middleware.

"""

import webob.dec
import webob.exc

from nova import context
from nova import flags
from nova import wsgi

FLAGS = flags.FLAGS
flags.DEFINE_boolean(
    'use_forwarded_for', False,
    'Treat X-Forwarded-For as the canonical remote address. '
    'Only enable this if you have a sanitizing proxy.')


class InjectContext(wsgi.Middleware):
    """Add a 'nova.context' to WSGI environ."""
    def __init__(self, context, *args, **kwargs):
        self.context = context
        super(InjectContext, self).__init__(*args, **kwargs)

    @webob.dec.wsgify(RequestClass=wsgi.Request)
    def __call__(self, req):
        req.environ['nova.context'] = self.context
        return self.application

예제 #16
0
파일: manager.py 프로젝트: xtoddx/nova
from nova import context
from nova import exception
from nova import flags
from nova import log as logging
from nova import manager
from nova import rpc
from nova import utils
from nova.volume import volume_types

LOG = logging.getLogger('nova.volume.manager')
FLAGS = flags.FLAGS
flags.DEFINE_string('storage_availability_zone', 'nova',
                    'availability zone of this service')
flags.DEFINE_string('volume_driver', 'nova.volume.driver.ISCSIDriver',
                    'Driver to use for volume creation')
flags.DEFINE_boolean('use_local_volumes', True,
                     'if True, will not discover local volumes')
flags.DEFINE_boolean('volume_force_update_capabilities', False,
                     'if True will force update capabilities on each check')


class VolumeManager(manager.SchedulerDependentManager):
    """Manages attachable block storage devices."""
    def __init__(self, volume_driver=None, *args, **kwargs):
        """Load the driver from the one specified in args, or from flags."""
        if not volume_driver:
            volume_driver = FLAGS.volume_driver
        self.driver = utils.import_object(volume_driver)
        super(VolumeManager, self).__init__(service_name='volume',
                                            *args,
                                            **kwargs)
        # NOTE(vish): Implementation specific db handling is done
예제 #17
0
class VsaState:
    CREATING = 'creating'  # VSA creating (not ready yet)
    LAUNCHING = 'launching'  # Launching VCs (all BE volumes were created)
    CREATED = 'created'  # VSA fully created and ready for use
    PARTIAL = 'partial'  # Some BE drives were allocated
    FAILED = 'failed'  # Some BE storage allocations failed
    DELETING = 'deleting'  # VSA started the deletion procedure


FLAGS = flags.FLAGS
flags.DEFINE_string('vsa_ec2_access_key', None,
                    'EC2 access key used by VSA for accessing nova')
flags.DEFINE_string('vsa_ec2_user_id', None,
                    'User ID used by VSA for accessing nova')
flags.DEFINE_boolean('vsa_multi_vol_creation', True,
                     'Ask scheduler to create multiple volumes in one call')
flags.DEFINE_string('vsa_volume_type_name', 'VSA volume type',
                    'Name of volume type associated with FE VSA volumes')

LOG = logging.getLogger('nova.vsa')


class API(base.Base):
    """API for interacting with the VSA manager."""
    def __init__(self, compute_api=None, volume_api=None, **kwargs):
        self.compute_api = compute_api or compute.API()
        self.volume_api = volume_api or volume.API()
        super(API, self).__init__(**kwargs)

    def _check_volume_type_correctness(self, vol_type):
        if vol_type.get('extra_specs') is None or\
예제 #18
0
"""Console Proxy Service."""

import functools
import socket

from nova import exception
from nova import flags
from nova import log as logging
from nova import manager
from nova import rpc
from nova import utils

FLAGS = flags.FLAGS
flags.DEFINE_string('console_driver', 'nova.console.xvp.XVPConsoleProxy',
                    'Driver to use for the console proxy')
flags.DEFINE_boolean('stub_compute', False,
                     'Stub calls to compute worker for tests')
flags.DEFINE_string('console_public_hostname', socket.gethostname(),
                    'Publicly visable name for this console host')


class ConsoleProxyManager(manager.Manager):
    """Sets up and tears down any console proxy connections.

    Needed for accessing instance consoles securely.

    """
    def __init__(self, console_driver=None, *args, **kwargs):
        if not console_driver:
            console_driver = FLAGS.console_driver
        self.driver = utils.import_object(console_driver)
        super(ConsoleProxyManager, self).__init__(*args, **kwargs)
예제 #19
0
                  `sqlite:///var/lib/nova/nova.sqlite`.

:enable_new_services:  when adding a new service to the database, is it in the
                       pool of available hardware (Default: True)

"""

from nova import exception
from nova import flags
from nova import utils


FLAGS = flags.FLAGS
flags.DEFINE_string('db_backend', 'sqlalchemy',
                    'The backend to use for db')
flags.DEFINE_boolean('enable_new_services', True,
                     'Services to be added to the available pool on create')
flags.DEFINE_string('instance_name_template', 'instance-%08x',
                    'Template string to be used to generate instance names')
flags.DEFINE_string('volume_name_template', 'volume-%08x',
                    'Template string to be used to generate instance names')
flags.DEFINE_string('snapshot_name_template', 'snapshot-%08x',
                    'Template string to be used to generate snapshot names')


IMPL = utils.LazyPluggable(FLAGS['db_backend'],
                           sqlalchemy='nova.db.sqlalchemy.api')


class NoMoreBlades(exception.Error):
    """No more available blades."""
    pass
예제 #20
0
파일: impl_qpid.py 프로젝트: xtoddx/nova
import eventlet
import greenlet
import qpid.messaging
import qpid.messaging.exceptions

from nova import flags
from nova.rpc import amqp as rpc_amqp
from nova.rpc.common import LOG

flags.DEFINE_string('qpid_hostname', 'localhost', 'Qpid broker hostname')
flags.DEFINE_string('qpid_port', '5672', 'Qpid broker port')
flags.DEFINE_string('qpid_username', '', 'Username for qpid connection')
flags.DEFINE_string('qpid_password', '', 'Password for qpid connection')
flags.DEFINE_string('qpid_sasl_mechanisms', '',
                    'Space separated list of SASL mechanisms to use for auth')
flags.DEFINE_boolean('qpid_reconnect', True, 'Automatically reconnect')
flags.DEFINE_integer('qpid_reconnect_timeout', 0,
                     'Reconnection timeout in seconds')
flags.DEFINE_integer('qpid_reconnect_limit', 0,
                     'Max reconnections before giving up')
flags.DEFINE_integer('qpid_reconnect_interval_min', 0,
                     'Minimum seconds between reconnection attempts')
flags.DEFINE_integer('qpid_reconnect_interval_max', 0,
                     'Maximum seconds between reconnection attempts')
flags.DEFINE_integer('qpid_reconnect_interval', 0,
                     'Equivalent to setting max and min to the same value')
flags.DEFINE_integer(
    'qpid_heartbeat', 5,
    'Seconds between heartbeats used to keep the connection alive')
flags.DEFINE_string('qpid_protocol', 'tcp',
                    "Transport to use, either 'tcp' or 'ssl'")