from canary.drivers.cassandradriver import CassandraStorageDriver from canary.util import canonicalize conf = cfg.CONF conf(project='canary', prog='canary', args=[]) log.setup('canary') LOG = log.getLogger(__name__) _CASSANDRA_OPTIONS = [ cfg.StrOpt('keyspace', default='canary', help='Keyspace for all queries made in session'), ] CASSANDRA_GROUP = cfg.OptGroup(name='cassandra', title='cassandra options') _CANARY_OPTIONS = [ cfg.IntOpt('interval', default='120', help='Retrieve activities for the last x seconds'), ] CANARY_GROUP = cfg.OptGroup(name='canary', title='canary options') conf.register_opts(_CASSANDRA_OPTIONS, group=CASSANDRA_GROUP) conf.register_opts(_CANARY_OPTIONS, group=CANARY_GROUP) class ItemResource(object): def on_get(self, req, resp):
# limitations under the License. """Handles all requests to the conductor service.""" from oslo.config import cfg from sahara.conductor import manager from sahara.conductor import resource as r from sahara.openstack.common import log as logging conductor_opts = [ cfg.BoolOpt('use_local', default=True, help='Perform sahara-conductor operations locally.'), ] conductor_group = cfg.OptGroup(name='conductor', title='Conductor Options') CONF = cfg.CONF CONF.register_group(conductor_group) CONF.register_opts(conductor_opts, conductor_group) LOG = logging.getLogger(__name__) def _get_id(obj): """Return object id. Allows usage of both an object or an object's ID as a parameter when dealing with relationships. """ try:
default=None, help=_("Location of the SSL key file to use " "for enabling SSL mode."), deprecated_group='DEFAULT'), cfg.IntOpt('workers', default=0, help=_("Number of workers for Heat service."), deprecated_group='DEFAULT'), cfg.IntOpt('max_header_line', default=16384, help=_('Maximum line size of message headers to be accepted. ' 'max_header_line may need to be increased when using ' 'large tokens (typically those generated by the ' 'Keystone v3 API with big service catalogs).')), ] api_group = cfg.OptGroup('heat_api') cfg.CONF.register_group(api_group) cfg.CONF.register_opts(api_opts, group=api_group) api_cfn_opts = [ cfg.StrOpt('bind_host', default='0.0.0.0', help=_('Address to bind the server. Useful when ' 'selecting a particular network interface.'), deprecated_group='DEFAULT'), cfg.IntOpt('bind_port', default=8000, help=_('The port on which the server will listen.'), deprecated_group='DEFAULT'), cfg.IntOpt('backlog', default=4096,
from raksha.openstack.common import context as req_context from raksha.openstack.common.gettextutils import _ from raksha.openstack.common import log as logging from raksha.openstack.common import rpc LOG = logging.getLogger(__name__) notification_topic_opt = cfg.ListOpt( 'topics', default=[ 'notifications', ], help='AMQP topic(s) used for openstack notifications') opt_group = cfg.OptGroup(name='rpc_notifier2', title='Options for rpc_notifier2') CONF = cfg.CONF CONF.register_group(opt_group) CONF.register_opt(notification_topic_opt, opt_group) def notify(context, message): """Sends a notification via RPC""" if not context: context = req_context.get_admin_context() priority = message.get('priority', CONF.default_notification_level) priority = priority.lower() for topic in CONF.rpc_notifier2.topics: topic = '%s.%s' % (topic, priority) try:
transport mechanism. """ from oslo.config import cfg import simplejson as json import zmq import meniscus.config as config from meniscus import env _LOG = env.get_logger(__name__) # ZMQ configuration options _ZMQ_GROUP = cfg.OptGroup( name='zmq_in', title='ZeroMQ Input Options') config.get_config().register_group(_ZMQ_GROUP) _ZMQ_OPTS = [ cfg.ListOpt('zmq_upstream_hosts', default=['127.0.0.1:5000'], help='list of upstream host:port pairs to poll for ' 'zmq messages') ] config.get_config().register_opts(_ZMQ_OPTS, group=_ZMQ_GROUP) try: config.init_config() except config.cfg.ConfigFilesNotFoundError as ex:
# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo.config import cfg mgm_group = cfg.OptGroup('mgm', 'Libra Pool Manager options') cfg.CONF.register_group(mgm_group) cfg.CONF.register_opts([ cfg.IntOpt('az', required=True, help='The az the nodes and IPs will reside in (to be ' 'passed to the API server'), cfg.StrOpt('pid', default='/var/run/libra/libra_mgm.pid', help='PID file'), cfg.StrOpt('node_basename', help='prepend the name of all nodes with this'), cfg.StrOpt( 'nova_auth_url', required=True, help='the auth URL for the Nova API'), cfg.StrOpt('nova_user', required=True,
from oslo.config import cfg import addons src = cfg.OptGroup(name='src', title='Credentials and general config for source cloud') src_opts = [ cfg.StrOpt('type', default='os', help='os - OpenStack Cloud'), cfg.StrOpt('host', default='-', help='ip-address controller for cloud'), cfg.StrOpt('ssh_host', default='', help='ip-address of cloud node for ssh connect'), cfg.StrOpt('ext_cidr', default='', help='external network CIDR'), cfg.StrOpt('user', default='-', help='user for access to API'), cfg.StrOpt('password', default='-', help='password for access to API'), cfg.StrOpt('tenant', default='-', help='tenant for access to API'), cfg.StrOpt('temp', default='-', help='temporary directory on controller') ] dst = cfg.OptGroup(name='dst', title='Credentials and general ' 'config for destination cloud') dst_opts = [ cfg.StrOpt('type', default='os', help='os - OpenStack Cloud'), cfg.StrOpt('host', default='-', help='ip-address controller for cloud'), cfg.StrOpt('ssh_host', default='', help='ip-address of cloud node for ssh connect'), cfg.StrOpt('ext_cidr', default='', help='external network CIDR'), cfg.StrOpt('user', default='-', help='user for access to API'), cfg.StrOpt('password', default='-', help='password for access to API'),
from heat.openstack.common import log as logging import acitoolkit.acisession import acitoolkit.acitoolkit as aci from oslo.config import cfg logger = logging.getLogger(__name__) plugin_opts = [ cfg.StrOpt('apic_host', default='apic_host', help='apic_host'), cfg.StrOpt('apic_username', default='apic_username', help='apic_username'), cfg.StrOpt('apic_password', default='apic_password', help='apic_password'), cfg.StrOpt('apic_system_id', default='_openstack', help='apic_system_id'), ] conf = 'heat.common.config' apic_optgroup = cfg.OptGroup(name='apic_plugin', title='options for the apic plugin') cfg.CONF.register_group(apic_optgroup) cfg.CONF.register_opts(plugin_opts, apic_optgroup) cfg.CONF.import_group(apic_optgroup, conf) class APIC(resource.Resource): properties_schema = { 'Project': { 'Type': 'String', 'Default': '', 'Description': _('Project Name') }, 'Network': { 'Type': 'String',
# Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Implementation of Inspector abstraction for VMware vSphere""" from oslo.config import cfg from oslo.utils import units from oslo.vmware import api from ceilometer.compute.virt import inspector as virt_inspector from ceilometer.compute.virt.vmware import vsphere_operations from ceilometer.openstack.common.gettextutils import _ opt_group = cfg.OptGroup(name='vmware', title='Options for VMware') OPTS = [ cfg.StrOpt('host_ip', default='', help='IP address of the VMware Vsphere host'), cfg.StrOpt('host_username', default='', help='Username of VMware Vsphere'), cfg.StrOpt('host_password', default='', help='Password of VMware Vsphere', secret=True), cfg.IntOpt('api_retry_count', default=10, help='Number of times a VMware Vsphere API must be retried'), cfg.FloatOpt('task_poll_interval', default=0.5,
module_logger = log print_logger = log CONF = cfg.CONF global_opts = [ cfg.StrOpt('file_hosts', default='/etc/hosts'), cfg.StrOpt('file_hostname', default='/etc/hostname'), cfg.StrOpt('self_config_file', default=os.path.sep.join([ os.path.split(os.path.realpath(__file__))[0], FileName.PATCHES_TOOL_CONFIG_FILE ])), ] CONF.register_opts(global_opts) default_group = cfg.OptGroup(name='DEFAULT', title='default config') default_opts = [ cfg.DictOpt('proxy_match_host', default=None), cfg.DictOpt('proxy_match_region', default=None), cfg.StrOpt('current_node', default='proxy001'), cfg.DictOpt('cascaded_add_route', default=None), cfg.DictOpt('cascaded_add_table_external_api', default=None), cfg.StrOpt('cascading_region', default='cascading.hybrid.huawei.com'), cfg.StrOpt('openstack_bak_path', default='/home/openstack_bak') ] CONF.register_group(default_group) CONF.register_opts(default_opts, default_group) env_group = cfg.OptGroup(name='ENV', title='environment for openstack') env_opts = [
LOG = log.getLogger(__name__) API_SERVICE_OPTS = [ cfg.IntOpt( 'port', default=8099, help='The port for the entropy API server.', ), cfg.StrOpt( 'host', default='0.0.0.0', help='The listen IP for the entropy API server.', ), ] opt_group = cfg.OptGroup(name='api', title='Options for the entropy-api service') cfg.CONF.register_group(opt_group) cfg.CONF.register_opts(API_SERVICE_OPTS, opt_group) CONF = cfg.CONF class DBHook(pecan.hooks.PecanHook): def __init__(self, db_conn): self.db_conn = db_conn def on_route(self, state): state.request.db_conn = self.db_conn PECAN_CONFIG = {
net_utils.load_net(trained_model, net) # net.load_from_npz(npz_fname) # net_utils.save_net(h5_fname, net) net.cuda() net.eval() print('load model succ...') t_det = Timer() t_total = Timer() pool = Pool(processes=1) ### setting for Http server #### # reading setting from conf opt_morestuff_group = cfg_oslo.OptGroup(name='morestuff', title='A More Complex Example') morestuff_opts = [ cfg_oslo.ListOpt('category', default=None, help=('A list of category')), ] CONF = cfg_oslo.CONF CONF.register_group(opt_morestuff_group) CONF.register_opts(morestuff_opts, opt_morestuff_group) CONF(default_config_files=['../webcam_img_capture/app.conf']) det_class = CONF.morestuff.category conn = sqlite3.connect('../webcam_img_capture/yolo.db')
# License for the specific language governing permissions and limitations # under the License. """Implementation of Inspector abstraction for XenAPI.""" from eventlet import timeout from oslo.config import cfg from oslo.utils import units try: import XenAPI as api except ImportError: api = None from ceilometer.compute.pollsters import util from monasca_agent.collector.virt import inspector as virt_inspector opt_group = cfg.OptGroup(name='xenapi', title='Options for XenAPI') xenapi_opts = [ cfg.StrOpt('connection_url', help='URL for connection to XenServer/Xen Cloud Platform'), cfg.StrOpt('connection_username', default='root', help='Username for connection to XenServer/Xen Cloud Platform'), cfg.StrOpt('connection_password', help='Password for connection to XenServer/Xen Cloud Platform', secret=True), cfg.IntOpt('login_timeout', default=10, help='Timeout in seconds for XenAPI login.'), ]
cfg.ListOpt('instance_type_extra_specs', default=[], help='A list of additional capabilities corresponding to ' 'instance_type_extra_specs for this compute ' 'host to advertise. Valid entries are name=value, pairs ' 'For example, "key1:val1, key2:val2"'), cfg.IntOpt('api_max_retries', default=60, help=('How many retries when a request does conflict.')), cfg.IntOpt('api_retry_interval', default=2, help=('How often to retry in seconds when a request ' 'does conflict')), ] ironic_group = cfg.OptGroup(name='ironic', title='Ironic Options') CONF = cfg.CONF CONF.register_group(ironic_group) CONF.register_opts(opts, ironic_group) _FIREWALL_DRIVER = "%s.%s" % (firewall.__name__, firewall.NoopFirewallDriver.__name__) _POWER_STATE_MAP = { ironic_states.POWER_ON: power_state.RUNNING, ironic_states.NOSTATE: power_state.NOSTATE, ironic_states.POWER_OFF: power_state.SHUTDOWN, }
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo.config import cfg adminapi_group = cfg.OptGroup('admin_api', 'Libra Admin API options') cfg.CONF.register_group(adminapi_group) cfg.CONF.register_opts( [ cfg.StrOpt('datadog_api_key', help='API key for datadog alerting'), cfg.StrOpt('datadog_app_key', help='Application key for datadog alerting'), cfg.StrOpt('datadog_env', default='unknown', help='Server enironment'), cfg.StrOpt('datadog_message_tail', help='Text to add at the end of a Datadog alert'), cfg.StrOpt('datadog_tags',
# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo.config import cfg cfg.CONF.register_group( cfg.OptGroup(name='service:api', title="Configuration for API Service")) cfg.CONF.register_opts([ cfg.IntOpt( 'workers', default=None, help='Number of worker processes to spawn'), cfg.StrOpt('api-base-uri', default='http://127.0.0.1:9001/'), cfg.StrOpt('api_host', default='0.0.0.0', help='API Host'), cfg.IntOpt('api_port', default=9001, help='API Port Number'), cfg.StrOpt('api_paste_config', default='api-paste.ini', help='File name for the paste.deploy config for designate-api'), cfg.StrOpt('auth_strategy', default='keystone', help='The strategy to use for auth. Supports noauth or ' 'keystone'), cfg.BoolOpt('enable-api-v1', default=True),
""" import logging as sys_logging import os from eventlet.green import socket from oslo.config import cfg from heat.common import wsgi from heat.openstack.common import log as logging from heat.openstack.common import rpc DEFAULT_PORT = 8000 paste_deploy_group = cfg.OptGroup('paste_deploy') paste_deploy_opts = [cfg.StrOpt('flavor'), cfg.StrOpt('config_file')] bind_opts = [ cfg.IntOpt('bind_port', default=8000), cfg.StrOpt('bind_host', default='127.0.0.1') ] service_opts = [ cfg.IntOpt('report_interval', default=10, help='seconds between nodes reporting state to datastore'), cfg.IntOpt('periodic_interval', default=60, help='seconds between running periodic tasks'), cfg.StrOpt('ec2_listen',
help='Attestation server Cert file for Identity verification'), cfg.StrOpt('attestation_port', default='8443', help='Attestation server port'), cfg.StrOpt('attestation_api_url', default='/OpenAttestationWebServices/V1.0', help='Attestation web API URL'), cfg.StrOpt('attestation_auth_blob', help='Attestation authorization blob - must change'), cfg.IntOpt('attestation_auth_timeout', default=60, help='Attestation status cache valid period length'), ] CONF = cfg.CONF trust_group = cfg.OptGroup(name='trusted_computing', title='Trust parameters') CONF.register_group(trust_group) CONF.register_opts(trusted_opts, group=trust_group) class HTTPSClientAuthConnection(httplib.HTTPSConnection): """Class to make a HTTPS connection, with support for full client-based SSL Authentication """ def __init__(self, host, port, key_file, cert_file, ca_file, timeout=None): httplib.HTTPSConnection.__init__(self, host, key_file=key_file, cert_file=cert_file) self.host = host self.port = port
""" import copy import logging as sys_logging import os from eventlet.green import socket from oslo.config import cfg from heat.common import wsgi from heat.openstack.common import log as logging from heat.openstack.common import rpc DEFAULT_PORT = 8000 paste_deploy_group = cfg.OptGroup('paste_deploy') paste_deploy_opts = [ cfg.StrOpt('flavor', help=_("The flavor to use")), cfg.StrOpt('api_paste_config', default="api-paste.ini", help=_("The API paste config file to use")) ] service_opts = [ cfg.IntOpt('periodic_interval', default=60, help='seconds between running periodic tasks'), cfg.StrOpt('heat_metadata_server_url', default="", help='URL of the Heat metadata server'), cfg.StrOpt('heat_waitcondition_server_url',
api_opts = [ cfg.BoolOpt('enabled', default=False, help='Whether the V3 API is enabled or not'), cfg.ListOpt('extensions_blacklist', default=[], help='A list of v3 API extensions to never load. ' 'Specify the extension aliases here.'), cfg.ListOpt('extensions_whitelist', default=[], help='If the list is not empty then a v3 API extension ' 'will only be loaded if it exists in this list. Specify ' 'the extension aliases here.') ] api_opts_group = cfg.OptGroup(name='osapi_v3', title='API v3 Options') LOG = logging.getLogger(__name__) CONF = cfg.CONF CONF.register_group(api_opts_group) CONF.register_opts(api_opts, api_opts_group) # List of v3 API extensions which are considered to form # the core API and so must be present # TODO(cyeoh): Expand this list as the core APIs are ported to V3 API_V3_CORE_EXTENSIONS = set(['consoles', 'extensions', 'flavors', 'ips', 'limits', 'servers', 'server-metadata', 'keypairs', 'console-output']) class FaultWrapper(base_wsgi.Middleware):
cfg.IntOpt('check_device_interval', default=1, help='After Ironic has completed creating the partition table, ' 'it continues to check for activity on the attached iSCSI ' 'device status at this interval prior to copying the image' ' to the node, in seconds'), cfg.IntOpt('check_device_max_retries', default=20, help='The maximum number of times to check that the device is ' 'not accessed by another process. If the device is still ' 'busy after that, the disk partitioning will be treated as' ' having failed.'), ] CONF = cfg.CONF opt_group = cfg.OptGroup(name='disk_partitioner', title='Options for the disk partitioner') CONF.register_group(opt_group) CONF.register_opts(opts, opt_group) LOG = logging.getLogger(__name__) class DiskPartitioner(object): def __init__(self, device, disk_label='msdos', alignment='optimal'): """A convenient wrapper around the parted tool. :param device: The device path. :param disk_label: The type of the partition table. Valid types are: "bsd", "dvh", "gpt", "loop", "mac", "msdos", "pc98", or "sun". :param alignment: Set alignment for newly created partitions.
help='A list of additional capabilities corresponding to ' 'flavor_extra_specs for this compute ' 'host to advertise. Valid entries are name=value, pairs ' 'For example, "key1:val1, key2:val2"'), cfg.StrOpt('driver', default='nova.virt.baremetal.pxe.PXE', help='Baremetal driver back-end (pxe or tilera)'), cfg.StrOpt('power_manager', default='nova.virt.baremetal.ipmi.IPMI', help='Baremetal power management method'), cfg.StrOpt('tftp_root', default='/tftpboot', help='Baremetal compute node\'s tftp root path'), ] baremetal_group = cfg.OptGroup(name='baremetal', title='Baremetal Options') CONF = cfg.CONF CONF.register_group(baremetal_group) CONF.register_opts(opts, baremetal_group) CONF.import_opt('host', 'nova.netconf') CONF.import_opt('my_ip', 'nova.netconf') DEFAULT_FIREWALL_DRIVER = "%s.%s" % (firewall.__name__, firewall.NoopFirewallDriver.__name__) def _get_baremetal_node_by_instance_uuid(instance_uuid): ctx = nova_context.get_admin_context() node = db.bm_node_get_by_instance_uuid(ctx, instance_uuid) if node['service_host'] != CONF.host:
for action, prepoll, timeout, poll in option_names_and_defaults: nova_benchmark_opts.extend([ cfg.FloatOpt("nova_server_%s_prepoll_delay" % action, default=float(prepoll), help='Time to sleep after %s before polling for status' % action), cfg.FloatOpt("nova_server_%s_timeout" % action, default=float(timeout), help='Server %s timeout' % action), cfg.FloatOpt("nova_server_%s_poll_interval" % action, default=float(poll), help='Server %s poll interval' % action) ]) CONF = cfg.CONF benchmark_group = cfg.OptGroup(name='benchmark', title='benchmark options') CONF.register_group(benchmark_group) CONF.register_opts(nova_benchmark_opts, group=benchmark_group) class NovaScenario(base.Scenario): @base.atomic_action_timer('nova.list_servers') def _list_servers(self, detailed=True): """Returns user servers list.""" return self.clients("nova").servers.list(detailed) @base.atomic_action_timer('nova.boot_server') def _boot_server(self, server_name, image_id,
vgw_opts = [ cfg.DictOpt('vgw_url', default={ 'fs_vgw_url': 'http://162.3.114.107:8081/', 'vcloud_vgw_url': 'http://162.3.114.108:8081/', 'aws_vgw_url': 'http://162.3.114.109:8081/' }, help="These values will be used for upload/download image " "from vgw host."), cfg.StrOpt('store_file_dir', default='/home/upload', help='Directory used for temporary storage ' 'during migrate volume') ] vgw_group = cfg.OptGroup(name='vgw', title='Vgw Options') CONF = cfg.CONF CONF.register_opts(volume_opts) CONF.register_group(vgw_group) CONF.register_opts(vgw_opts, group="vgw") class LVMVolumeDriver(driver.VolumeDriver): """Executes commands relating to Volumes.""" VERSION = '2.0.0' def __init__(self, vg_obj=None, *args, **kwargs): super(LVMVolumeDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(volume_opts) self.configuration.append_config_values(vgw_opts)
from raksha.openstack.common.rpc import matchmaker as mm_common redis = importutils.try_import('redis') matchmaker_redis_opts = [ cfg.StrOpt('host', default='127.0.0.1', help='Host to locate redis'), cfg.IntOpt('port', default=6379, help='Use this port to connect to redis host.'), cfg.StrOpt('password', default=None, help='Password for Redis server. (optional)'), ] CONF = cfg.CONF opt_group = cfg.OptGroup(name='matchmaker_redis', title='Options for Redis-based MatchMaker') CONF.register_group(opt_group) CONF.register_opts(matchmaker_redis_opts, opt_group) LOG = logging.getLogger(__name__) class RedisExchange(mm_common.Exchange): def __init__(self, matchmaker): self.matchmaker = matchmaker self.redis = matchmaker.redis super(RedisExchange, self).__init__() class RedisTopicExchange(RedisExchange): """ Exchange where all topic keys are split, sending to second half.
from sqlalchemy.orm import exc as sqlalchemy_exceptions from oslo.config import cfg from designate.openstack.common import excutils from designate.openstack.common import log as logging from designate import exceptions from designate.backend import base from designate.backend.impl_powerdns import models from designate.sqlalchemy.session import get_session from designate.sqlalchemy.session import SQLOPTS from designate.sqlalchemy.expressions import InsertFromSelect LOG = logging.getLogger(__name__) TSIG_SUPPORTED_ALGORITHMS = ['hmac-md5'] cfg.CONF.register_group(cfg.OptGroup( name='backend:powerdns', title="Configuration for Powerdns Backend" )) cfg.CONF.register_opts([ cfg.StrOpt('domain-type', default='NATIVE', help='PowerDNS Domain Type'), cfg.ListOpt('also-notify', default=[], help='List of additional IPs to ' 'send NOTIFYs to'), ] + SQLOPTS, group='backend:powerdns') # Overide the default DB connection registered above, to avoid name conflicts # between the Designate and PowerDNS databases. cfg.CONF.set_default('database_connection', 'sqlite:///$state_path/powerdns.sqlite', group='backend:powerdns')
from sqlalchemy import Unicode from sqlalchemy.orm import exc, relationship from sqlalchemy.ext.declarative import declarative_base from billingstack.collector import states from billingstack.collector.storage import Connection, StorageEngine from billingstack.openstack.common import log as logging from billingstack.sqlalchemy.types import JSON, UUID from billingstack.sqlalchemy import api, model_base, session, utils LOG = logging.getLogger(__name__) BASE = declarative_base(cls=model_base.ModelBase) cfg.CONF.register_group( cfg.OptGroup(name='collector:sqlalchemy', title='Config for collector sqlalchemy plugin')) cfg.CONF.register_opts(session.SQLOPTS, group='collector:sqlalchemy') class PGProvider(BASE, model_base.BaseMixin): """ A Payment Gateway - The thing that processes a Payment Method This is registered either by the Admin or by the PaymentGateway plugin """ __tablename__ = 'pg_provider' name = Column(Unicode(60), nullable=False) title = Column(Unicode(100)) description = Column(Unicode(255))
from sqlalchemy import Column from sqlalchemy import Unicode, Float, DateTime from sqlalchemy.ext.declarative import declarative_base from billingstack.openstack.common import log as logging from billingstack.rater.storage import Connection, StorageEngine from billingstack.sqlalchemy.types import UUID from billingstack.sqlalchemy import api, model_base, session # DB SCHEMA BASE = declarative_base(cls=model_base.ModelBase) LOG = logging.getLogger(__name__) cfg.CONF.register_group( cfg.OptGroup(name='rater:sqlalchemy', title='Config for rater sqlalchemy plugin')) cfg.CONF.register_opts(session.SQLOPTS, group='rater:sqlalchemy') class Usage(BASE, model_base.BaseMixin): """ A record of something that's used from for example a Metering system like Ceilometer """ measure = Column(Unicode(255)) start_timestamp = Column(DateTime) end_timestamp = Column(DateTime) price = Column(Float) total = Column(Float)
# distributed under the license is distributed on an "as is" basis, without # warranties or conditions of any kind, either express or implied. see the # license for the specific language governing permissions and limitations # under the license. import socket import ssl import logging from oslo.config import cfg from dnh.handler.base import BaseHandler LOG = logging.getLogger(__name__) CFG_GRP = 'handler:nsd4' cfg.CONF.register_group( cfg.OptGroup(name=CFG_GRP, title='Configuration for NSD 4 handler')) cfg.CONF.register_opts([ cfg.StrOpt('keyfile', default='/etc/nsd/nsd_control.key', required=True), cfg.StrOpt('certfile', default='/etc/nsd/nsd_control.pem', required=True), cfg.ListOpt('servers', required=True), cfg.StrOpt('pattern', required=True), ], group=CFG_GRP) DEFAULT_PORT = 8952 class NSD4Handler(BaseHandler): """ Handle domain create and delete events by sending commands to a NSD 4 server """
def setUp(self): super(PoolManagerServiceNoopTest, self).setUp() self.config(threshold_percentage=100, enable_recovery_timer=False, enable_sync_timer=False, cache_driver='noop', group='service:pool_manager') # TODO(kiall): Rework all this pool config etc into a fixture.. # Configure the Pool ID self.config(pool_id='794ccc2c-d751-44fe-b57f-8894c9f5c842', group='service:pool_manager') # Configure the Pool section_name = 'pool:794ccc2c-d751-44fe-b57f-8894c9f5c842' section_opts = [ cfg.ListOpt('targets', default=[ 'f278782a-07dc-4502-9177-b5d85c5f7c7e', 'a38703f2-b71e-4e5b-ab22-30caaed61dfd', ]), cfg.ListOpt('nameservers', default=[ 'c5d64303-4cba-425a-9f3c-5d708584dde4', 'c67cdc95-9a9e-4d2a-98ed-dc78cbd85234', ]), cfg.ListOpt('also_notifies', default=[]), ] cfg.CONF.register_group(cfg.OptGroup(name=section_name)) cfg.CONF.register_opts(section_opts, group=section_name) # Configure the Pool Targets section_name = 'pool_target:f278782a-07dc-4502-9177-b5d85c5f7c7e' section_opts = [ cfg.StrOpt('type', default='fake'), cfg.ListOpt('masters', default=['127.0.0.1:5354']), cfg.DictOpt('options', default={}) ] cfg.CONF.register_group(cfg.OptGroup(name=section_name)) cfg.CONF.register_opts(section_opts, group=section_name) section_name = 'pool_target:a38703f2-b71e-4e5b-ab22-30caaed61dfd' section_opts = [ cfg.StrOpt('type', default='fake'), cfg.ListOpt('masters', default=['127.0.0.1:5354']), cfg.DictOpt('options', default={}) ] cfg.CONF.register_group(cfg.OptGroup(name=section_name)) cfg.CONF.register_opts(section_opts, group=section_name) # Configure the Pool Nameservers section_name = 'pool_nameserver:c5d64303-4cba-425a-9f3c-5d708584dde4' section_opts = [ cfg.StrOpt('host', default='127.0.0.1'), cfg.StrOpt('port', default=5355), ] cfg.CONF.register_group(cfg.OptGroup(name=section_name)) cfg.CONF.register_opts(section_opts, group=section_name) section_name = 'pool_nameserver:c67cdc95-9a9e-4d2a-98ed-dc78cbd85234' section_opts = [ cfg.StrOpt('host', default='127.0.0.1'), cfg.StrOpt('port', default=5356), ] cfg.CONF.register_group(cfg.OptGroup(name=section_name)) cfg.CONF.register_opts(section_opts, group=section_name) # Start the Service self.service = self.start_service('pool_manager') self.cache = self.service.cache