from engine import rpc from engine import utils from engine import exception from engine.scheduler import driver from engine.scheduler import simple from engine.vsa.api import VsaState from engine.volume import volume_types LOG = logging.getLogger('engine.scheduler.vsa') FLAGS = flags.FLAGS flags.DEFINE_integer('drive_type_approx_capacity_percent', 10, 'The percentage range for capacity comparison') flags.DEFINE_integer('vsa_unique_hosts_per_alloc', 10, 'The number of unique hosts per storage allocation') flags.DEFINE_boolean('vsa_select_unique_drives', True, 'Allow selection of same host for multiple drives') def BYTES_TO_GB(bytes): return bytes >> 30 def GB_TO_BYTES(gb): return gb << 30 class VsaScheduler(simple.SimpleScheduler): """Implements Scheduler for volume placement.""" def __init__(self, *args, **kwargs): super(VsaScheduler, self).__init__(*args, **kwargs) self._notify_all_volume_hosts("startup")
from engine import exception from engine.scheduler import driver from engine.scheduler import chance FLAGS = flags.FLAGS flags.DEFINE_integer("max_cores", 16, "maximum number of instance cores to allow per host") flags.DEFINE_integer("max_gigabytes", 10000, "maximum number of volume gigabytes to allow per host") flags.DEFINE_integer("max_networks", 1000, "maximum number of networks to allow per host") flags.DEFINE_string('default_schedule_zone', None, 'zone to use when user doesnt specify one') flags.DEFINE_list('isolated_images', [], 'Images to run on isolated host') flags.DEFINE_list('isolated_hosts', [], 'Host reserved for specific images') flags.DEFINE_boolean('skip_isolated_core_check', True, 'Allow overcommitting vcpus on isolated hosts') class SimpleScheduler(chance.ChanceScheduler): """Implements Naive Scheduler that tries to find least loaded host.""" def _schedule_instance(self, context, instance_opts, *_args, **_kwargs): """Picks a host that is up and has the fewest running instances.""" elevated = context.elevated() availability_zone = instance_opts.get('availability_zone') zone, host = FLAGS.default_schedule_zone, None if availability_zone: zone, _x, host = availability_zone.partition(':') if host and context.is_admin:
import os import sys from engine import flags from engine import log as logging from engine import version from engine import wsgi from engine.vnc import auth from engine.vnc import proxy LOG = logging.getLogger('engine.vncproxy') FLAGS = flags.FLAGS flags.DEFINE_string('vncproxy_wwwroot', '/var/lib/engine/noVNC/', 'Full path to noVNC directory') flags.DEFINE_boolean('vnc_debug', False, 'Enable debugging features, like token bypassing') flags.DEFINE_integer('vncproxy_port', 6080, 'Port that the VNC proxy should bind to') flags.DEFINE_string('vncproxy_host', '0.0.0.0', 'Address that the VNC proxy should bind to') flags.DEFINE_integer('vncproxy_flash_socket_policy_port', 843, 'Port that the socket policy listener should bind to') flags.DEFINE_string('vncproxy_flash_socket_policy_host', '0.0.0.0', 'Address that the socket policy listener should bind to') flags.DEFINE_integer('vnc_token_ttl', 300, 'How many seconds before deleting tokens') flags.DEFINE_string('vncproxy_manager', 'engine.vnc.auth.VNCProxyAuthManager', 'Manager for vncproxy auth') def get_wsgi_server():
FLAGS = flags.FLAGS flags.DEFINE_integer('ldap_schema_version', 2, 'Current version of the LDAP schema') flags.DEFINE_string('ldap_url', 'ldap://localhost', 'Point this at your ldap server') flags.DEFINE_string('ldap_password', 'changeme', 'LDAP password') flags.DEFINE_string('ldap_user_dn', 'cn=Manager,dc=example,dc=com', 'DN of admin user') flags.DEFINE_string('ldap_user_id_attribute', 'uid', 'Attribute to use as id') flags.DEFINE_string('ldap_user_name_attribute', 'cn', 'Attribute to use as name') flags.DEFINE_string('ldap_user_unit', 'Users', 'OID for Users') flags.DEFINE_string('ldap_user_subtree', 'ou=Users,dc=example,dc=com', 'OU for Users') flags.DEFINE_boolean('ldap_user_modify_only', False, 'Modify attributes for users instead of creating/deleting') flags.DEFINE_string('ldap_project_subtree', 'ou=Groups,dc=example,dc=com', 'OU for Projects') flags.DEFINE_string('role_project_subtree', 'ou=Groups,dc=example,dc=com', 'OU for Roles') # NOTE(vish): mapping with these flags is necessary because we're going # to tie in to an existing ldap schema flags.DEFINE_string('ldap_cloudadmin', 'cn=cloudadmins,ou=Groups,dc=example,dc=com', 'cn for Cloud Admins') flags.DEFINE_string('ldap_itsec', 'cn=itsec,ou=Groups,dc=example,dc=com', 'cn for ItSec') flags.DEFINE_string('ldap_sysadmin', 'cn=sysadmins,ou=Groups,dc=example,dc=com', 'cn for Sysadmins') flags.DEFINE_string('ldap_netadmin', 'cn=netadmins,ou=Groups,dc=example,dc=com', 'cn for NetAdmins')
"""Console Proxy Service.""" import functools import socket from engine import exception from engine import flags from engine import log as logging from engine import manager from engine import rpc from engine import utils FLAGS = flags.FLAGS flags.DEFINE_string('console_driver', 'engine.console.xvp.XVPConsoleProxy', 'Driver to use for the console proxy') flags.DEFINE_boolean('stub_compute', False, 'Stub calls to compute worker for tests') flags.DEFINE_string('console_public_hostname', socket.gethostname(), 'Publicly visable name for this console host') class ConsoleProxyManager(manager.Manager): """Sets up and tears down any console proxy connections. Needed for accessing instance consoles securely. """ def __init__(self, console_driver=None, *args, **kwargs): if not console_driver: console_driver = FLAGS.console_driver self.driver = utils.import_object(console_driver) super(ConsoleProxyManager, self).__init__(*args, **kwargs)
from engine import flags from engine import log as logging LOG = logging.getLogger("engine.crypto") FLAGS = flags.FLAGS flags.DEFINE_string('ca_file', 'cacert.pem', _('Filename of root CA')) flags.DEFINE_string('key_file', os.path.join('private', 'cakey.pem'), _('Filename of private key')) flags.DEFINE_string('crl_file', 'crl.pem', _('Filename of root Certificate Revocation List')) flags.DEFINE_string('keys_path', '$state_path/keys', _('Where we keep our keys')) flags.DEFINE_string('ca_path', '$state_path/CA', _('Where we keep our root CA')) flags.DEFINE_boolean('use_project_ca', False, _('Should we use a CA for each project?')) flags.DEFINE_string( 'user_cert_subject', '/C=US/ST=California/L=MountainView/O=AnsoLabs/' 'OU=EngineDev/CN=%s-%s-%s', _('Subject for certificate for users, ' '%s for project, user, timestamp')) flags.DEFINE_string( 'project_cert_subject', '/C=US/ST=California/L=MountainView/O=AnsoLabs/' 'OU=EngineDev/CN=project-ca-%s-%s', _('Subject for certificate for projects, ' '%s for project, timestamp')) flags.DEFINE_string( 'vpn_cert_subject', '/C=US/ST=California/L=MountainView/O=AnsoLabs/' 'OU=EngineDev/CN=project-vpn-%s-%s', _('Subject for certificate for vpns, ' '%s for project, timestamp'))
class VsaState: CREATING = 'creating' # VSA creating (not ready yet) LAUNCHING = 'launching' # Launching VCs (all BE volumes were created) CREATED = 'created' # VSA fully created and ready for use PARTIAL = 'partial' # Some BE drives were allocated FAILED = 'failed' # Some BE storage allocations failed DELETING = 'deleting' # VSA started the deletion procedure FLAGS = flags.FLAGS flags.DEFINE_string('vsa_ec2_access_key', None, 'EC2 access key used by VSA for accessing engine') flags.DEFINE_string('vsa_ec2_user_id', None, 'User ID used by VSA for accessing engine') flags.DEFINE_boolean('vsa_multi_vol_creation', True, 'Ask scheduler to create multiple volumes in one call') flags.DEFINE_string('vsa_volume_type_name', 'VSA volume type', 'Name of volume type associated with FE VSA volumes') LOG = logging.getLogger('engine.vsa') class API(base.Base): """API for interacting with the VSA manager.""" def __init__(self, compute_api=None, volume_api=None, **kwargs): self.compute_api = compute_api or compute.API() self.volume_api = volume_api or volume.API() super(API, self).__init__(**kwargs) def _check_volume_type_correctness(self, vol_type): if vol_type.get('extra_specs') is None or\
# License for the specific language governing permissions and limitations # under the License. """ Common Auth Middleware. """ import webob.dec import webob.exc from engine import flags from engine import wsgi FLAGS = flags.FLAGS flags.DEFINE_boolean( 'use_forwarded_for', False, 'Treat X-Forwarded-For as the canonical remote address. ' 'Only enable this if you have a sanitizing proxy.') class InjectContext(wsgi.Middleware): """Add a 'engine.context' to WSGI environ.""" def __init__(self, context, *args, **kwargs): self.context = context super(InjectContext, self).__init__(*args, **kwargs) @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): req.environ['engine.context'] = self.context return self.application
import os import paramiko from xml.etree import ElementTree from engine import exception from engine import flags from engine import log as logging from engine import utils from engine.utils import ssh_execute from engine.volume.driver import ISCSIDriver LOG = logging.getLogger("engine.volume.driver") FLAGS = flags.FLAGS flags.DEFINE_boolean('san_thin_provision', 'true', 'Use thin provisioning for SAN volumes?') flags.DEFINE_string('san_ip', '', 'IP address of SAN controller') flags.DEFINE_string('san_login', 'admin', 'Username for SAN controller') flags.DEFINE_string('san_password', '', 'Password for SAN controller') flags.DEFINE_string('san_privatekey', '', 'Filename of private key to use for SSH authentication') flags.DEFINE_string('san_clustername', '', 'Cluster name to use for creating volumes') flags.DEFINE_integer('san_ssh_port', 22, 'SSH port to use with SAN') flags.DEFINE_boolean( 'san_is_local', 'false', 'Execute commands locally instead of over SSH; ' 'use if the volume service is running on the SAN device') flags.DEFINE_string('san_zfs_volume_base', 'rpool/', 'The ZFS path under which to create zvols for volumes.')
from engine import context from engine import exception from engine import flags from engine import log as logging from engine import manager from engine import rpc from engine import utils from engine.volume import volume_types LOG = logging.getLogger('engine.volume.manager') FLAGS = flags.FLAGS flags.DEFINE_string('storage_availability_zone', 'engine', 'availability zone of this service') flags.DEFINE_string('volume_driver', 'engine.volume.driver.ISCSIDriver', 'Driver to use for volume creation') flags.DEFINE_boolean('use_local_volumes', True, 'if True, will not discover local volumes') flags.DEFINE_boolean('volume_force_update_capabilities', False, 'if True will force update capabilities on each check') class VolumeManager(manager.SchedulerDependentManager): """Manages attachable block storage devices.""" def __init__(self, volume_driver=None, *args, **kwargs): """Load the driver from the one specified in args, or from flags.""" if not volume_driver: volume_driver = FLAGS.volume_driver self.driver = utils.import_object(volume_driver) super(VolumeManager, self).__init__(service_name='volume', *args, **kwargs) # NOTE(vish): Implementation specific db handling is done