from neutron_lib import constants as n_const from oslo_config import cfg from oslo_log import log as logging from oslo_utils import netutils import six from stevedore import driver from neutron._i18n import _ from neutron.common import constants as l3_constants from neutron.common import utils LOG = logging.getLogger(__name__) OPTS = [ cfg.StrOpt('pd_dhcp_driver', default='dibbler', help=_('Service to handle DHCPv6 Prefix delegation.')), ] class PrefixDelegation(object): def __init__(self, context, pmon, intf_driver, notifier, pd_update_cb, agent_conf): self.context = context self.pmon = pmon self.intf_driver = intf_driver self.notifier = notifier self.routers = {} self.pd_update_cb = pd_update_cb self.agent_conf = agent_conf self.pd_dhcp_driver = driver.DriverManager(
# not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Base class for classes that need modular database access.""" from oslo_config import cfg from oslo_utils import importutils db_driver_opt = cfg.StrOpt('db_driver', default='nova.db', help='The driver to use for database access') CONF = cfg.CONF CONF.register_opt(db_driver_opt) class Base(object): """DB driver is injected in the init method.""" def __init__(self, db_driver=None): super(Base, self).__init__() if not db_driver: db_driver = CONF.db_driver self.db = importutils.import_module(db_driver)
title="Application Catalog Options") service_broker_group = cfg.OptGroup(name="service_broker", title="Service Broker Options") artifacts_group = cfg.OptGroup(name="artifacts", title="Glance Artifacts Options") orchestration_group = cfg.OptGroup(name='orchestration', title='Orchestration Service Options') ApplicationCatalogGroup = [ # Application catalog tempest configuration cfg.StrOpt("region", default="", help="The application_catalog region name to use. If empty, " "the value of identity.region is used instead. " "If no such region is found in the service catalog, " "the first found one is used."), cfg.StrOpt("linux_image", default="debian-8-m-agent.qcow2", help="Image for linux services"), cfg.StrOpt("catalog_type", default="application-catalog", help="Catalog type of Application Catalog."), cfg.StrOpt("endpoint_type", default="publicURL", choices=["publicURL", "adminURL", "internalURL"], help="The endpoint type for application catalog service."), cfg.IntOpt("build_interval", default=3, help="Time in seconds between application catalog"
import pytz from dateutil import parser import jinja2 from oslo_config import cfg import openstack import requests import yaml PROJECT_NAME = 'openstack-sandbox-manager' CONF = cfg.CONF opts = [ cfg.BoolOpt('debug', required=False, default=False), cfg.BoolOpt('dry-run', required=False, default=False), cfg.IntOpt('threshold', help='Threshold in days', default=60), cfg.StrOpt('cloud', help='Managed cloud', default='service'), cfg.StrOpt('mailgun-api', default='https://api.mailgun.net/v3/betacloud.io/messages', required=False), cfg.StrOpt('mailgun-from', default='Betacloud Operations <*****@*****.**>', required=False), cfg.StrOpt('mailgun-key', required=False), cfg.StrOpt('openstack-project', help='OpenStack project', required='True') ] CONF.register_cli_opts(opts) CONF(sys.argv[1:], project=PROJECT_NAME) if CONF.debug: level = logging.DEBUG else:
# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import os from oslo_config import cfg versions_opts = [ cfg.StrOpt('public_endpoint', help="Public url to use for versions endpoint. The default " "is None, which will use the request's host_url " "attribute to populate the URL base. If Cinder is " "operating behind a proxy, you will want to change " "this to represent the proxy's URL."), ] CONF = cfg.CONF CONF.register_opts(versions_opts) def get_view_builder(req): base_url = CONF.public_endpoint or req.application_url return ViewBuilder(base_url) class ViewBuilder(object): def __init__(self, base_url):
from oslo_utils import timeutils import six from six.moves import range import six.moves.urllib.parse as urlparse from nova import exception from nova.i18n import _LE, _LI, _LW import nova.image.download as image_xfers from nova import objects from nova import signature_utils glance_opts = [ cfg.StrOpt( 'host', default='$my_ip', # TODO(sdague): remove in N deprecated_for_removal=True, help='DEPRECATED: Glance server hostname or IP address. ' 'Use the "api_servers" option instead.'), cfg.IntOpt( 'port', default=9292, min=1, max=65535, # TODO(sdague): remove in N deprecated_for_removal=True, help='DEPRECATED: Glance server port. Use the "api_servers" ' 'option instead.'), cfg.StrOpt( 'protocol', default='http',
from oslo_log import log from oslo_utils import importutils from oslo_utils import timeutils import six from manila import exception from manila.i18n import _ from manila.share import driver from manila.share.drivers import generic from manila.share import utils LOG = log.getLogger(__name__) share_opts = [ cfg.StrOpt('lvm_share_export_root', default='$state_path/mnt', help='Base folder where exported shares are located.'), cfg.StrOpt('lvm_share_export_ip', deprecated_for_removal=True, deprecated_reason='Use lvm_share_export_ips instead.', help='IP to be added to export string.'), cfg.ListOpt('lvm_share_export_ips', help='List of IPs to export shares.'), cfg.IntOpt('lvm_share_mirrors', default=0, help='If set, create LVMs with multiple mirrors. Note that ' 'this requires lvm_mirrors + 2 PVs with available space.'), cfg.StrOpt('lvm_share_volume_group', default='lvm-shares', help='Name for the VG that will contain exported shares.'), cfg.ListOpt('lvm_share_helpers',
from magnum.common import paths from magnum.i18n import _ from magnum.i18n import _LW LOG = logging.getLogger(__name__) KUBE_SECURE_PORT = '6443' KUBE_INSECURE_PORT = '8080' DOCKER_PORT = '2376' template_def_opts = [ cfg.StrOpt('k8s_atomic_template_path', default=paths.basedir_def('templates/kubernetes/' 'kubecluster.yaml'), deprecated_name='template_path', deprecated_group='bay_heat', help=_( 'Location of template to build a k8s cluster on atomic.')), cfg.StrOpt('k8s_coreos_template_path', default=paths.basedir_def('templates/kubernetes/' 'kubecluster-coreos.yaml'), help=_( 'Location of template to build a k8s cluster on CoreOS.')), cfg.StrOpt('etcd_discovery_service_endpoint_format', default='https://discovery.etcd.io/new?size=%(size)d', help=_('Url for etcd public discovery endpoint.')), cfg.StrOpt('swarm_atomic_template_path', default=paths.basedir_def('templates/swarm/' 'swarmcluster.yaml'), help=_('Location of template to build a swarm '
from ironic_inspector.common.i18n import _ MIN_VERSION_HEADER = 'X-OpenStack-Ironic-Inspector-API-Minimum-Version' MAX_VERSION_HEADER = 'X-OpenStack-Ironic-Inspector-API-Maximum-Version' VERSION_HEADER = 'X-OpenStack-Ironic-Inspector-API-Version' VALID_ADD_PORTS_VALUES = ('all', 'active', 'pxe') VALID_KEEP_PORTS_VALUES = ('all', 'present', 'added') VALID_STORE_DATA_VALUES = ('none', 'swift') FIREWALL_OPTS = [ cfg.BoolOpt('manage_firewall', default=True, help=_('Whether to manage firewall rules for PXE port.')), cfg.StrOpt('dnsmasq_interface', default='br-ctlplane', help=_('Interface on which dnsmasq listens, the default is for ' 'VM\'s.')), cfg.IntOpt('firewall_update_period', default=15, help=_('Amount of time in seconds, after which repeat periodic ' 'update of firewall.')), cfg.StrOpt('firewall_chain', default='ironic-inspector', help=_('iptables chain name to use.')), cfg.ListOpt('ethoib_interfaces', default=[], help=_('List of Etherent Over InfiniBand interfaces ' 'on the Inspector host which are used for physical ' 'access to the DHCP network. Multiple interfaces would ' 'be attached to a bond or bridge specified in ' 'dnsmasq_interface. The MACs of the InfiniBand nodes '
import eventlet.backdoor import greenlet from oslo_config import cfg from glance.openstack.common._i18n import _LI help_for_backdoor_port = ( "Acceptable values are 0, <port>, and <start>:<end>, where 0 results " "in listening on a random tcp port number; <port> results in listening " "on the specified port number (and not enabling backdoor if that port " "is in use); and <start>:<end> results in listening on the smallest " "unused port number within the specified range of port numbers. The " "chosen port is displayed in the service's log file.") eventlet_backdoor_opts = [ cfg.StrOpt('backdoor_port', help="Enable eventlet backdoor. %s" % help_for_backdoor_port) ] CONF = cfg.CONF CONF.register_opts(eventlet_backdoor_opts) LOG = logging.getLogger(__name__) def list_opts(): """Entry point for oslo-config-generator. """ return [(None, copy.deepcopy(eventlet_backdoor_opts))] class EventletBackdoorConfigValueError(Exception): def __init__(self, port_range, help_msg, ex):
import fnmatch import os from jsonpath_rw_ext import parser from oslo_config import cfg from oslo_log import log from oslo_utils import timeutils import six import yaml from ceilometer.event.storage import models from ceilometer.i18n import _, _LI OPTS = [ cfg.StrOpt('definitions_cfg_file', default="event_definitions.yaml", help="Configuration file for event definitions."), cfg.BoolOpt('drop_unmatched_notifications', default=False, help='Drop notifications if no event definition matches. ' '(Otherwise, we convert them with just the default traits)'), cfg.MultiStrOpt('store_raw', default=[], help='Store the raw notification for select priority ' 'levels (info and/or error). By default, raw details are ' 'not captured.') ] cfg.CONF.register_opts(OPTS, group='event') LOG = log.getLogger(__name__)
service_opts = [ cfg.IntOpt('report_interval', default=10, help='Interval, in seconds, between nodes reporting state ' 'to datastore'), cfg.IntOpt('periodic_interval', default=60, help='Interval, in seconds, between running periodic tasks'), cfg.IntOpt('periodic_fuzzy_delay', default=60, help='Range, in seconds, to randomly delay when starting the' ' periodic task scheduler to reduce stampeding.' ' (Disable by setting to 0)'), cfg.StrOpt('osapi_volume_listen', default="0.0.0.0", help='IP address on which OpenStack Volume API listens'), cfg.PortOpt('osapi_volume_listen_port', default=8776, help='Port on which OpenStack Volume API listens'), cfg.IntOpt('osapi_volume_workers', help='Number of workers for OpenStack Volume API service. ' 'The default is equal to the number of CPUs available.'), ] profiler_opts = [ cfg.BoolOpt("profiler_enabled", default=False, help=_('If False fully disable profiling feature.')), cfg.BoolOpt("trace_sqlalchemy", default=False,
"""Defines interface for DB access.""" from collections import namedtuple from oslo_config import cfg from oslo_db import api as db_api db_opts = [ cfg.StrOpt('db_backend', default='sqlalchemy', help='The backend to use for DB.'), ] CONF = cfg.CONF CONF.register_opts(db_opts) # entrypoint namespace for db backend BACKEND_MAPPING = {'sqlalchemy': 'craton.db.sqlalchemy.api'} IMPL = db_api.DBAPI.from_config(cfg.CONF, backend_mapping=BACKEND_MAPPING, lazy=True) def get_user_info(context, user): return IMPL.get_user_info(context, user) # Devices Blame = namedtuple('Blame', ['source', 'variable']) def device_blame_variables(device, keys=None):
# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import glanceclient from oslo_config import cfg from ceilometer.agent import plugin_base from ceilometer import keystone_client SERVICE_OPTS = [ cfg.StrOpt('glance', default='image', help='Glance service type.'), ] class ImagesDiscovery(plugin_base.DiscoveryBase): def __init__(self, conf): super(ImagesDiscovery, self).__init__(conf) creds = conf.service_credentials self.glance_client = glanceclient.Client( version='2', session=keystone_client.get_session(conf), region_name=creds.region_name, interface=creds.interface, service_type=conf.service_types.glance)
from oslo_config import cfg from oslo_utils import importutils from neutron.common import repos # TODO(ihrachyshka): maintain separate HEAD files per branch HEADS_FILENAME = 'HEADS' CURRENT_RELEASE = "liberty" MIGRATION_BRANCHES = ('expand', 'contract') mods = repos.NeutronModules() VALID_SERVICES = list(map(mods.alembic_name, mods.installed_list())) _core_opts = [ cfg.StrOpt('core_plugin', default='', help=_('Neutron plugin provider module')), cfg.ListOpt('service_plugins', default=[], help=_("The service plugins Neutron will use")), cfg.StrOpt('service', choices=VALID_SERVICES, help=_("The advanced service to execute the command against. " "Can be one of '%s'.") % "', '".join(VALID_SERVICES)) ] _quota_opts = [ cfg.StrOpt('quota_driver', default='', help=_('Neutron quota driver class')), ]
from oslo_config import cfg from nova.conf import paths xvp_group = cfg.OptGroup('xvp', title='XVP options', help=""" Configuration options for XVP. xvp (Xen VNC Proxy) is a proxy server providing password-protected VNC-based access to the consoles of virtual machines hosted on Citrix XenServer. """) xvp_opts = [ cfg.StrOpt('console_xvp_conf_template', default=paths.basedir_def('nova/console/xvp.conf.template'), deprecated_group='DEFAULT', help='XVP conf template'), cfg.StrOpt('console_xvp_conf', default='/etc/xvp.conf', deprecated_group='DEFAULT', help='Generated XVP conf file'), cfg.StrOpt('console_xvp_pid', default='/var/run/xvp.pid', deprecated_group='DEFAULT', help='XVP master process pid file'), cfg.StrOpt('console_xvp_log', default='/var/log/xvp.log', deprecated_group='DEFAULT', help='XVP log file'), cfg.PortOpt('console_xvp_multiplex_port', default=5900,
from oslo_config import cfg from oslo_log import log import six from manila import exception from manila.i18n import _ from manila.i18n import _LI from manila.share import driver from manila.share.drivers.hpe import hpe_3par_mediator from manila.share import share_types from manila import utils HPE3PAR_OPTS = [ cfg.StrOpt('hpe3par_api_url', default='', help="3PAR WSAPI Server Url like " "https://<3par ip>:8080/api/v1", deprecated_name='hp3par_api_url'), cfg.StrOpt('hpe3par_username', default='', help="3PAR username with the 'edit' role", deprecated_name='hp3par_username'), cfg.StrOpt('hpe3par_password', default='', help="3PAR password for the user specified in hpe3par_username", secret=True, deprecated_name='hp3par_password'), cfg.StrOpt('hpe3par_san_ip', default='', help="IP address of SAN controller", deprecated_name='hp3par_san_ip'),
""" import hashlib import hmac from oslo_config import cfg import six from ceilometer import utils OPTS = [ cfg.StrOpt('metering_secret', secret=True, default='change this or be hacked', help='Secret value for signing metering messages.', deprecated_opts=[cfg.DeprecatedOpt("metering_secret", "DEFAULT"), cfg.DeprecatedOpt("metering_secret", "publisher_rpc")] ), ] cfg.CONF.register_opts(OPTS, group="publisher") def compute_signature(message, secret): """Return the signature for a message dictionary.""" digest_maker = hmac.new(secret, '', hashlib.sha256) for name, value in utils.recursive_keypairs(message): if name == 'message_signature': # Skip any existing signature value, which would not have # been part of the original message.
cfg.IPOpt('bind_host', default='0.0.0.0', help=_('Address to bind the server. Useful when ' 'selecting a particular network interface.'), deprecated_group='DEFAULT'), cfg.PortOpt('bind_port', default=8004, help=_('The port on which the server will listen.'), deprecated_group='DEFAULT'), cfg.IntOpt('backlog', default=4096, help=_("Number of backlog requests " "to configure the socket with."), deprecated_group='DEFAULT'), cfg.StrOpt('cert_file', help=_("Location of the SSL certificate file " "to use for SSL mode."), deprecated_group='DEFAULT'), cfg.StrOpt('key_file', help=_("Location of the SSL key file to use " "for enabling SSL mode."), deprecated_group='DEFAULT'), cfg.IntOpt('workers', min=0, default=0, help=_("Number of workers for Heat service. " "Default value 0 means, that service will start number " "of workers equal number of cores on server."), deprecated_group='DEFAULT'), cfg.IntOpt('max_header_line', default=16384, help=_('Maximum line size of message headers to be accepted. '
} } } taas_quota_opts = [ cfg.IntOpt('quota_tap_service', default=1, help=_('Number of Tap Service instances allowed per tenant')), cfg.IntOpt('quota_tap_flow', default=10, help=_('Number of Tap flows allowed per tenant')) ] cfg.CONF.register_opts(taas_quota_opts, 'QUOTAS') TaasOpts = [ cfg.StrOpt('driver', default='', help=_("Name of the TaaS Driver")), cfg.BoolOpt('enabled', default=False, help=_("Enable TaaS")), cfg.IntOpt('vlan_range_start', default=3900, help=_("Starting range of TAAS VLAN IDs")), cfg.IntOpt('vlan_range_end', default=4000, help=_("End range of TAAS VLAN IDs")), ] cfg.CONF.register_opts(TaasOpts, 'taas') class Taas(extensions.ExtensionDescriptor): @classmethod def get_name(cls): return "Neutron Tap as a Service"
# under the License. from collections import OrderedDict from oslo_config import cfg from oslo_log import log as logging from six.moves import configparser from glance.common import exception from glance import i18n _ = i18n._ _LE = i18n._LE swift_opts = [ cfg.StrOpt('default_swift_reference', default="ref1", help=_('The reference to the default swift account/backing ' 'store parameters to use for adding new images.')), cfg.StrOpt('swift_store_auth_address', help=_('The address where the Swift authentication service ' 'is listening.(deprecated)')), cfg.StrOpt('swift_store_user', secret=True, help=_('The user to authenticate against the Swift ' 'authentication service (deprecated)')), cfg.StrOpt('swift_store_key', secret=True, help=_('Auth key for the user authenticating against the ' 'Swift authentication service. (deprecated)')), cfg.StrOpt('swift_store_config_file', secret=True, help=_('The config file that has the swift account(s)' 'configs.')), ]
from oslo_utils import fileutils from oslo_utils import imageutils from oslo_utils import timeutils from oslo_utils import units import psutil from cinder import exception from cinder.i18n import _ from cinder import utils from cinder.volume import throttling from cinder.volume import utils as volume_utils LOG = logging.getLogger(__name__) image_helper_opts = [cfg.StrOpt('image_conversion_dir', default='$state_path/conversion', help='Directory used for temporary storage ' 'during image conversion'), ] CONF = cfg.CONF CONF.register_opts(image_helper_opts) QEMU_IMG_LIMITS = processutils.ProcessLimits( cpu_time=8, address_space=1 * units.Gi) # NOTE(abhishekk): qemu-img convert command supports raw, qcow2, qed, # vdi, vmdk, vhd and vhdx disk-formats but glance doesn't support qed # disk-format. # Ref: http://docs.openstack.org/image-guide/convert-images.html VALID_DISK_FORMATS = ('raw', 'vmdk', 'vdi', 'qcow2', 'vhd', 'vhdx', 'parallels')
import os import sys from kuryr.lib._i18n import _ from kuryr.lib import config as lib_config from oslo_config import cfg import logging from kuryr_kubernetes import version LOG = logging.getLogger(__name__) kuryr_k8s_opts = [ cfg.StrOpt('pybasedir', help=_('Directory where Kuryr-kubernetes python module is ' 'installed.'), default=os.path.abspath( os.path.join(os.path.dirname(__file__), '../../'))), ] daemon_opts = [ cfg.BoolOpt('daemon_enabled', help=_('Enable CNI Daemon configuration.'), default=True, deprecated_for_removal=True, deprecated_reason="Deployment without kuryr-daemon is now " "deprecated.", deprecated_since="Rocky"), cfg.StrOpt('bind_address', help=_('Bind address for CNI daemon HTTP server. It is ' 'recommened to allow only local connections.'),
from neutron.db import agents_db from neutron.db import agentschedulers_db as as_db from neutron.db import model_base from neutron_dynamic_routing._i18n import _ from neutron_dynamic_routing._i18n import _LW from neutron_dynamic_routing.extensions import bgp_dragentscheduler as bgp_dras_ext # noqa from neutron_dynamic_routing.services.bgp.common import constants as bgp_consts LOG = logging.getLogger(__name__) BGP_DRAGENT_SCHEDULER_OPTS = [ cfg.StrOpt( 'bgp_drscheduler_driver', default='neutron_dynamic_routing.services.bgp.scheduler' '.bgp_dragent_scheduler.ChanceScheduler', help=_('Driver used for scheduling BGP speakers to BGP DrAgent')) ] cfg.CONF.register_opts(BGP_DRAGENT_SCHEDULER_OPTS) class BgpSpeakerDrAgentBinding(model_base.BASEV2): """Represents a mapping between BGP speaker and BGP DRAgent""" __tablename__ = 'bgp_speaker_dragent_bindings' bgp_speaker_id = sa.Column(sa.String(length=36), sa.ForeignKey("bgp_speakers.id", ondelete='CASCADE'),
from cinder import exception from cinder.i18n import _ from cinder.image import image_utils from cinder import interface from cinder import objects from cinder import utils from cinder.volume import configuration from cinder.volume.drivers import remotefs as remotefs_drv VERSION = '1.1.0' LOG = logging.getLogger(__name__) volume_opts = [ cfg.StrOpt('smbfs_shares_config', default=r'C:\OpenStack\smbfs_shares.txt', help='File with the list of available smbfs shares.'), cfg.StrOpt('smbfs_default_volume_format', default='vhd', choices=['vhd', 'vhdx'], help=('Default format that will be used when creating volumes ' 'if no volume format is specified.')), cfg.StrOpt('smbfs_mount_point_base', default=r'C:\OpenStack\_mnt', help=('Base dir containing mount points for smbfs shares.')), cfg.DictOpt('smbfs_pool_mappings', default={}, help=('Mappings between share locations and pool names. ' 'If not specified, the share names will be used as ' 'pool names. Example: ' '//addr/share:pool_name,//addr/share2:pool_name2')),
# License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg import yaml from tacker.agent.linux import utils from tacker.common import log from tacker.openstack.common import jsonutils from tacker.openstack.common import log as logging from tacker.vm.mgmt_drivers import abstract_driver from tacker.vm.mgmt_drivers import constants as mgmt_constants LOG = logging.getLogger(__name__) OPTS = [ cfg.StrOpt('user', default='root', help=_('user name to login openwrt')), cfg.StrOpt('password', default='', help=_('password to login openwrt')), ] cfg.CONF.register_opts(OPTS, 'openwrt') class DeviceMgmtOpenWRT(abstract_driver.DeviceMGMTAbstractDriver): def get_type(self): return 'openwrt' def get_name(self): return 'openwrt' def get_description(self): return 'Tacker DeviceMgmt OpenWRT Driver'
import logging import sys import textwrap import warnings import yaml from oslo_config import cfg from oslo_serialization import jsonutils import stevedore from oslo_policy import policy LOG = logging.getLogger(__name__) GENERATOR_OPTS = [ cfg.StrOpt('output-file', help='Path of the file to write to. Defaults to stdout.'), ] RULE_OPTS = [ cfg.MultiStrOpt('namespace', help='Option namespace(s) under "oslo.policy.policies" in ' 'which to query for options.'), cfg.StrOpt('format', help='Desired format for the output.', default='yaml', choices=['json', 'yaml']), ] ENFORCER_OPTS = [ cfg.StrOpt('namespace', help='Option namespace under "oslo.policy.enforcer" in '
import sys import memcache from oslo_config import cfg from oslo_log import log as logging import six from stackalytics.processor import config from stackalytics.processor import utils CONF = cfg.CONF LOG = logging.getLogger(__name__) OPTS = [ cfg.BoolOpt('restore', short='r', help='Restore data into memcached'), cfg.StrOpt('file', short='f', help='The name of file to store data'), cfg.StrOpt('min-compress-len', default=0, short='m', help='The threshold length to kick in auto-compression'), ] SINGLE_KEYS = [ 'module_groups', 'project_types', 'repos', 'releases', 'companies', 'last_update_members_date', 'last_member_index', 'runtime_storage_update_time' ] ARRAY_KEYS = ['record', 'user'] BULK_READ_SIZE = 64 MEMCACHED_URI_PREFIX = r'^memcached:\/\/'
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from neutron._i18n import _ DHCP_AGENT_OPTS = [ cfg.IntOpt('resync_interval', default=5, help=_("The DHCP agent will resync its state with Neutron to " "recover from any transient notification or RPC errors. " "The interval is number of seconds between attempts.")), cfg.StrOpt('dhcp_driver', default='neutron.agent.linux.dhcp.Dnsmasq', help=_("The driver used to manage the DHCP server.")), cfg.BoolOpt('enable_isolated_metadata', default=False, help=_("The DHCP server can assist with providing metadata " "support on isolated networks. Setting this value to " "True will cause the DHCP server to append specific " "host routes to the DHCP request. The metadata service " "will only be activated when the subnet does not " "contain any router port. The guest instance must be " "configured to request host routes via DHCP (Option " "121). This option doesn't have any effect when " "force_metadata is set to True.")), cfg.BoolOpt('force_metadata', default=False, help=_("In some cases the Neutron router is not present to " "provide the metadata IP but the DHCP server can be " "used to provide this info. Setting this value will "
def set_instance_attestation_status(self, instance): try: attestation_opts = [ cfg.StrOpt('attestation_server_ip', help='Attestation server IP'), cfg.StrOpt('attestation_server_port', help='Attestation server port'), cfg.StrOpt('attestation_api_url', help='Attestation server API url'), cfg.StrOpt('attestation_auth_blob', help='Attestation server authentication details'), cfg.StrOpt('attestation_server_ca_file', help='Attestation server SSL certificate file location'), ] # Read the configuration params from nova.conf CONF_ATTEST = cfg.CONF trust_group = cfg.OptGroup(name='trusted_computing', title='Trust parameters') CONF_ATTEST.register_group(trust_group) CONF_ATTEST.register_opts(attestation_opts, group=trust_group) host = CONF.trusted_computing.attestation_server_ip #'10.1.68.95' port = CONF.trusted_computing.attestation_server_port #'10.1.68.95' attestation_url = CONF.trusted_computing.attestation_api_url# + '?hostNameEqualTo=' + CONF.my_ip + '&vmInstanceIdEqualTo=' + instance.uuid auth_blob = CONF.trusted_computing.attestation_auth_blob #'admin:password' userAndPass = b64encode(auth_blob).decode("ascii") # Setup the header & body for the request headers = { 'Authorization' : 'Basic %s' % userAndPass, 'Accept': 'application/samlassertion+xml', 'Content-Type': 'application/json' } params = None container_name=None if CONF.compute_driver == "novadocker.virt.docker.DockerDriver": container_name = 'nova-' + instance.uuid params = {'host_name': instance.host, 'vm_instance_id': container_name} else : params = {'host_name': instance.host, 'vm_instance_id': instance.uuid} # Setup the SSL context for certificate verification if hasattr(ssl,'SSLContext') and CONF.trusted_computing.attestation_server_ca_file: LOG.info("Using SSL context HTTPS client connection to attestation server with SSL certificate verification") as_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2) as_context.verify_mode = ssl.CERT_REQUIRED as_context.check_hostname = True as_context.load_verify_locations(CONF.trusted_computing.attestation_server_ca_file) c = httplib.HTTPSConnection(host, port=port, context=as_context) else: LOG.info("Using socket HTTPS client connection to attestation server with SSL certificate verification") c = HTTPSClientAuthConnection(host, port, key_file=None, cert_file=None, ca_file=CONF.trusted_computing.attestation_server_ca_file) c.request('POST', attestation_url, jsonutils.dumps(params), headers) res = c.getresponse() res_data = res.read() # Parse the SAML assertion to get the relevant details policy_name, policy_status = self.verify_and_parse_saml(res_data) if policy_name == 'na': if CONF.compute_driver == "novadocker.virt.docker.DockerDriver": params = {'host_name': CONF.my_ip, 'vm_instance_id': container_name} else : params = {'host_name': CONF.my_ip, 'vm_instance_id': instance.uuid} c.request('POST', attestation_url, jsonutils.dumps(params), headers) res = c.getresponse() res_data = res.read() policy_name, policy_status = self.verify_and_parse_saml(res_data) # If policy_name is not available do not try again as VM might be non-measured. Once response from CIT # clearly mentions reason for failure we can add some more logic here. For now do not retry. if policy_name == 'na': policy_name = 'non-measured' instance['metadata']['measurement_policy'] = policy_name instance['metadata']['measurement_status'] = policy_status instance.save() except Exception as e: LOG.error("Exception retrieving the VM attestation details") LOG.error(e) return ""