def start_up(): try: config.init_config() except config.cfg.ConfigFilesNotFoundError as ex: _LOG.exception(ex.message) conf = config.get_config() application = api = falcon.API() api.add_route('/', VersionResource()) #http correlation endpoint api.add_route('/v1/tenant/{tenant_id}/publish', PublishMessageResource()) #syslog correlation endpoint server = SyslogServer(("0.0.0.0", 5140), syslog.MessageHandler(conf)) server.start() syslog_server_proc = Process(target=start_io) syslog_server_proc.start() _LOG.info('Syslog server started as process: {}'.format( syslog_server_proc.pid)) celery.conf.CELERYBEAT_SCHEDULE = { 'worker_stats': { 'task': 'stats.publish', 'schedule': timedelta(seconds=publish_stats.WORKER_STATUS_INTERVAL) }, } #include blank argument to celery in order for beat to start correctly celery_proc = Process(target=celery.worker_main, args=[['', '--beat']]) celery_proc.start() _LOG.info('Celery started as process: {}'.format(celery_proc.pid)) return application
def get_normalizer(conf=config.get_config()): """This returns both a normalizer as well as a list of loaded rules""" normalization_conf = conf.liblognorm normalizer = LogNormalizer() loaded_rules = list() if normalization_conf.rules_dir: loaded_rules = load_rules(normalizer, normalization_conf.rules_dir) return (normalizer, loaded_rules)
def get_handler(): """ factory method that returns an instance of ElasticsearchHandler """ conf = config.get_config() es_handler = ElasticsearchHandler(conf.elasticsearch) es_handler.connect() return es_handler
def test_loading(self): init_config(['--config-file', '../etc/meniscus/meniscus.conf']) conf = get_config() conf.register_group(test_group) conf.register_opts(CFG_TEST_OPTIONS, group=test_group) self.assertTrue(conf.test.should_pass)
def get_handler(): """ factory method that returns an instance of MongoDBHandler """ conf = config.get_config() mongo_handler = MongoDBHandler(conf.mongodb) try: mongo_handler.connect() except MongoDBHandlerError as ex: _LOG.exception(ex) return mongo_handler
def test_loading(self): try: init_config(['--config-file', '../etc/meniscus/meniscus.conf']) except: init_config(['--config-file', './etc/meniscus/meniscus.conf']) conf = get_config() conf.register_group(test_group) conf.register_opts(CFG_TEST_OPTIONS, group=test_group) self.assertTrue(conf.test.should_pass)
def test_loading(self): try: init_config(['--config-file', '../etc/meniscus/meniscus.conf']) except: print('ass {}'.format(os.getcwd())) init_config(['--config-file', './etc/meniscus/meniscus.conf']) conf = get_config() conf.register_group(test_group) conf.register_opts(CFG_TEST_OPTIONS, group=test_group) self.assertTrue(conf.test.should_pass)
def db_handler(): try: config.init_config() except config.cfg.ConfigFilesNotFoundError: #TODO(dmend) Log config error pass conf = config.get_config() _handler = datasource_handler(conf) _handler.connect() return _handler
def start_up(): try: config.init_config() except config.cfg.ConfigFilesNotFoundError as ex: _LOG.exception(ex.message) conf = config.get_config() application = api = falcon.API() api.add_route('/', VersionResource()) #http correlation endpoint api.add_route('/v1/tenant/{tenant_id}/publish', PublishMessageResource()) #syslog correlation endpoint server = SyslogServer( ("0.0.0.0", 5140), syslog.MessageHandler(conf)) server.start() syslog_server_proc = Process(target=start_io) syslog_server_proc.start() _LOG.info( 'Syslog server started as process: {}'.format(syslog_server_proc.pid) ) celery.conf.CELERYBEAT_SCHEDULE = { 'worker_stats': { 'task': 'stats.publish', 'schedule': timedelta(seconds=publish_stats.WORKER_STATUS_INTERVAL) }, } #include blank argument to celery in order for beat to start correctly #celery_proc = Process(target=celery.worker_main, args=[['', '--beat']]) celery_proc = Process(target=celery.worker_main) celery_proc.start() _LOG.info( 'Celery started as process: {}'.format(celery_proc.pid) ) return application
from celery import Celery from oslo.config import cfg import meniscus.config as config from meniscus import env _LOG = env.get_logger(__name__) # Celery configuration options _CELERY_GROUP = cfg.OptGroup(name='celery', title='Celery Options') config.get_config().register_group(_CELERY_GROUP) _CELERY = [ cfg.StrOpt('BROKER_URL', default="librabbitmq://guest@localhost//", help="""url to the broker behind celery""" ), cfg.IntOpt('CELERYD_CONCURRENCY', default=10, help="""Number of concurrent worker processes/threads""" ), cfg.BoolOpt('CELERY_DISABLE_RATE_LIMITS', default=True, help="""disable celery rate limit""" ), cfg.StrOpt('CELERY_TASK_SERIALIZER', default="json", help="""default serialization method to use""" )
from meniscus.data.datastore import handler from meniscus import env from meniscus.ext.plugin import import_module _LOG = env.get_logger(__name__) COORDINATOR_DB = 'coordinator_db' DEFAULT_SINK = 'default_sink' SHORT_TERM_STORE = 'short_term_store' #Register data handler options for coordinator db coordinator_db_group = cfg.OptGroup( name=COORDINATOR_DB, title='Coordinator DB Configuration Options') config.get_config().register_group(coordinator_db_group) coordinator_db_options = [ cfg.BoolOpt('active', default=True, help="""Determines whether the handler for this store is registered for use """ ), cfg.StrOpt('adapter_name', default='mongodb', help="""Sets the name of the handler to load for datasource interactions. e.g. mongodb """ ), cfg.ListOpt('servers',
from oslo.config import cfg import meniscus.config as config from meniscus import env from meniscus.sinks import elasticsearch _LOG = env.get_logger(__name__) _DATA_SINKS_GROUP = cfg.OptGroup(name='data_sinks', title='Data Sink List') config.get_config().register_group(_DATA_SINKS_GROUP) _SINK = [ cfg.ListOpt('valid_sinks', default=['elasticsearch'], help="""valid data sinks list"""), cfg.StrOpt('default_sink', default='elasticsearch', help="""default data sink""") ] config.get_config().register_opts(_SINK, group=_DATA_SINKS_GROUP) try: config.init_config() except config.cfg.ConfigFilesNotFoundError as ex: _LOG.exception(ex.message) conf = config.get_config() VALID_SINKS = conf.data_sinks.valid_sinks DEFAULT_SINK = conf.data_sinks.default_sink
def setUp(self): init_config(['--config-file', 'meniscus.cfg']) conf = get_config() self.handler = datasource_handler(conf) self.handler.connect()
import os from meniscus import config from meniscus import env from pylognorm import LogNormalizer from oslo.config import cfg _LOG = env.get_logger(__name__) # Normalization configuration options _NORMALIZATION_GROUP = cfg.OptGroup(name='liblognorm', title='Liblognorm options') config.get_config().register_group(_NORMALIZATION_GROUP) _NORMALIZATION = [ cfg.StrOpt('rules_dir', default=None, help="""directory to load rules from""") ] config.get_config().register_opts(_NORMALIZATION, group=_NORMALIZATION_GROUP) try: config.init_config() except config.cfg.ConfigFilesNotFoundError as ex: _LOG.exception(ex.message) def get_normalizer(conf=config.get_config()): """This returns both a normalizer as well as a list of loaded rules"""
from pywebhdfs import webhdfs from meniscus import config from meniscus import env from meniscus.openstack.common import jsonutils from meniscus.queue import celery from meniscus.storage import transaction _LOG = env.get_logger(__name__) # HDFS configuration options _hdfs_group = cfg.OptGroup(name='hdfs_sink', title='HDFS Sink Options') config.get_config().register_group(_hdfs_group) _hdfs_options = [ cfg.StrOpt('host', default='localhost', help="""WebHDFS hostname""" ), cfg.StrOpt('port', default='5700', help="""WebHDFS port number.""" ), cfg.StrOpt('user_name', default='hdfs', help="""WebHDFS user_name.""" ), cfg.StrOpt('base_directory',
import os import socket import struct import subprocess import sys from oslo.config import cfg from meniscus import env from meniscus import config _LOG = env.get_logger(__name__) _network_interface_group = cfg.OptGroup(name='network_interface', title='Default network interface name') config.get_config().register_group(_network_interface_group) _network_interface_options = [ cfg.StrOpt('default_ifname', default='eth0', help="""The default network interface to pull the IP from""") ] config.get_config().register_opts(_network_interface_options, group=_network_interface_group) try: config.init_config() except config.cfg.ConfigFilesNotFoundError as ex: _LOG.exception(ex.message)
from oslo.config import cfg from meniscus.config import get_config from meniscus.config import init_config from meniscus.data.model.tenant import ( load_tenant_from_dict, load_token_from_dict) from meniscus.data.model.worker import WorkerConfiguration from meniscus.openstack.common import jsonutils from meniscus.proxy import NativeProxy # cache configuration options _cache_group = cfg.OptGroup(name='cache', title='Cache Options') get_config().register_group(_cache_group) _CACHE_OPTIONS = [ cfg.IntOpt('default_expires', default=3600, help="""default time to keep items in cache""" ), cfg.IntOpt('config_expires', default=0, help="""Default time to keep worker config items in cache.""" ), cfg.StrOpt('cache_config', default='cache-config', help="""The name of the cache to store worker config values""" ), cfg.StrOpt('cache_tenant', default='cache-tenant', help="""The name of the cache to store worker config values"""
from multiprocessing import Process from time import sleep import requests from oslo.config import cfg from meniscus.api.utils.request import http_request from meniscus.config import get_config from meniscus.config import init_config from meniscus.data.cache_handler import BroadcastCache # cache configuration options _BROADCAST_GROUP = cfg.OptGroup(name='broadcast_settings', title='Broadcast Settings') get_config().register_group(_BROADCAST_GROUP) _BROADCAST_OPTIONS = [ cfg.IntOpt('broadcast_message_interval', default=60, help="""default time to broadcast messages""" ) ] get_config().register_opts(_BROADCAST_OPTIONS, group=_BROADCAST_GROUP) try: init_config() conf = get_config() except cfg.ConfigFilesNotFoundError: conf = get_config()
import sys from oslo.config import cfg from meniscus import env from meniscus import config _LOG = env.get_logger(__name__) _network_interface_group = cfg.OptGroup( name='network_interface', title='Default network interface name' ) config.get_config().register_group(_network_interface_group) _network_interface_options = [ cfg.StrOpt('default_ifname', default='eth0', help="""The default network interface to pull the IP from""" ) ] config.get_config().register_opts( _network_interface_options, group=_network_interface_group ) try: config.init_config()
from oslo.config import cfg import meniscus.config as config from meniscus import env from meniscus.sinks import elasticsearch _LOG = env.get_logger(__name__) _DATA_SINKS_GROUP = cfg.OptGroup(name='data_sinks', title='Data Sink List') config.get_config().register_group(_DATA_SINKS_GROUP) _SINK = [ cfg.ListOpt('valid_sinks', default=['elasticsearch'], help="""valid data sinks list""" ), cfg.StrOpt('default_sink', default='elasticsearch', help="""default data sink""" ) ] config.get_config().register_opts(_SINK, group=_DATA_SINKS_GROUP) try: config.init_config() except config.cfg.ConfigFilesNotFoundError as ex: _LOG.exception(ex.message) conf = config.get_config()
from oslo.config import cfg from meniscus.api.coordinator import coordinator_errors as error from meniscus.config import get_config from meniscus.config import init_config from meniscus.data.model.worker import Worker from meniscus import env # cache configuration options _COORDINATOR_GROUP = cfg.OptGroup(name='coordinator_settings', title='Coordinator Settings') get_config().register_group(_COORDINATOR_GROUP) _COORDINATOR_CONSTANTS = [ cfg.ListOpt('valid_route_list', default=['new', 'online', 'draining'], help="""default duration for monitoring failed workers""" ), cfg.ListOpt('valid_status_list', default=['new', 'offline', 'online', 'draining'], help="""count of reported failures""" ) ] get_config().register_opts(_COORDINATOR_CONSTANTS, group=_COORDINATOR_GROUP) try: init_config() conf = get_config() except cfg.ConfigFilesNotFoundError: conf = get_config()
import sys import uuid from kombu import Connection, Exchange, Queue from kombu.pools import producers from elasticsearch import helpers as es_helpers import meniscus.config as config from meniscus import env from meniscus.data.handlers import elasticsearch from meniscus.queue import celery _LOG = env.get_logger(__name__) conf = config.get_config() broker_url = conf.celery.BROKER_URL es_handler = elasticsearch.get_handler() BULK_SIZE = es_handler.bulk_size TTL = es_handler.ttl ELASTICSEARCH_QUEUE = 'elasticsearch' try: # The broker where our exchange is. connection = Connection(broker_url) # The exchange we send our index requests to. es_exchange = Exchange( ELASTICSEARCH_QUEUE, exchange_type='direct', exchange_durable=True)
from elasticsearch import Elasticsearch, ElasticsearchException from oslo.config import cfg from meniscus.data.handlers import base from meniscus import config from meniscus import env _LOG = env.get_logger(__name__) #Register options for Elasticsearch elasticsearch_group = cfg.OptGroup( name="elasticsearch", title='Elasticsearch Configuration Options') config.get_config().register_group(elasticsearch_group) elasticsearch_options = [ cfg.ListOpt('servers', default=['localhost:9200'], help="""hostname:port for db servers """ ), cfg.IntOpt('bulk_size', default=100, help="""Amount of records to transmit in bulk """ ), cfg.StrOpt('ttl', default="30d", help="""default time to live for documents
from pymongo import MongoClient from oslo.config import cfg from meniscus.config import get_config from meniscus.data.handler import ( DatabaseHandlerError, DatasourceHandler, register_handler, STATUS_CONNECTED, STATUS_CLOSED ) # MongoDB configuration options _mongodb_group = cfg.OptGroup(name='mongodb', title='MongoDB Options') get_config().register_group(_mongodb_group) _MONGODB_OPTIONS = [ cfg.ListOpt('mongo_servers', default=['localhost:27017'], help="""MongoDB servers to connect to.""" ), cfg.StrOpt('database', default='test', help="""MongoDB database to use.""" ), cfg.StrOpt('username', default='', help="""MongoDB username to use when authenticating. If this value is left unset, then authentication against the MongoDB will not be utilized.""", secret=True ), cfg.StrOpt('password',
from celery import Celery from oslo.config import cfg import meniscus.config as config from meniscus import env _LOG = env.get_logger(__name__) # Celery configuration options _CELERY_GROUP = cfg.OptGroup(name='celery', title='Celery Options') config.get_config().register_group(_CELERY_GROUP) _CELERY = [ cfg.StrOpt('BROKER_URL', default="librabbitmq://guest@localhost//", help="""url to the broker behind celery"""), cfg.IntOpt('CELERYD_CONCURRENCY', default=10, help="""Number of concurrent worker processes/threads"""), cfg.BoolOpt('CELERY_DISABLE_RATE_LIMITS', default=True, help="""disable celery rate limit"""), cfg.StrOpt('CELERY_TASK_SERIALIZER', default="json", help="""default serialization method to use""") ] config.get_config().register_opts(_CELERY, group=_CELERY_GROUP) try:
from meniscus.config import get_config from meniscus.config import init_config from meniscus.data.cache_handler import ConfigCache from meniscus.data.model.worker import WorkerConfiguration from meniscus.ext.plugin import import_module from meniscus import env from meniscus.openstack.common import log log.setup('meniscus') _LOG = env.get_logger(__name__) # default configuration options _node_group = cfg.OptGroup(name='node', title='Node') get_config().register_group(_node_group) _NODE_OPTIONS = [ cfg.StrOpt('personality', default='worker', help="""The personality to load""" ), cfg.StrOpt('coordinator_uri', default='http://localhost:8080/v1', help="""The URI of the Coordinator (can be a load balancer)""" ) ] get_config().register_opts(_NODE_OPTIONS, group=_node_group) try: init_config()
from portal.input.usyslog import SyslogMessageHandler from meniscus.api.correlation import correlator import meniscus.api.correlation.correlation_exceptions as errors from meniscus import env from meniscus import config from oslo.config import cfg from meniscus.storage import dispatch from meniscus.normalization.normalizer import * # Syslog server options syslog_group = cfg.OptGroup( name='syslog_server', title='Syslog server options') config.get_config().register_group(syslog_group) config.get_config().register_opts( [cfg.IntOpt('max_messages_per_stream', default=-1, help="""Sets the number of messages to consume per stream until the the server should break the connection to force a reconenct and hopefully a loadbalanced node rotation for the client. """ )], group=syslog_group) _LOG = env.get_logger(__name__) class MessageHandler(SyslogMessageHandler):
import os from meniscus import config from meniscus import env from pylognorm import LogNormalizer from oslo.config import cfg _LOG = env.get_logger(__name__) # Normalization configuration options _NORMALIZATION_GROUP = cfg.OptGroup( name='liblognorm', title='Liblognorm options') config.get_config().register_group(_NORMALIZATION_GROUP) _NORMALIZATION = [ cfg.StrOpt('rules_dir', default=None, help="""directory to load rules from""" ) ] config.get_config().register_opts( _NORMALIZATION, group=_NORMALIZATION_GROUP) try: config.init_config() except config.cfg.ConfigFilesNotFoundError as ex: _LOG.exception(ex.message)
import requests from oslo.config import cfg from meniscus.api.utils.request import http_request from meniscus.api.utils.retry import retry from meniscus.api.utils import sys_assist from meniscus.config import get_config from meniscus.config import init_config from meniscus.data.cache_handler import ConfigCache from meniscus.openstack.common import jsonutils # cache configuration options _STATUS_UPDATE_GROUP = cfg.OptGroup(name='status_update', title='Status Update Settings') get_config().register_group(_STATUS_UPDATE_GROUP) _CACHE_OPTIONS = [ cfg.IntOpt('load_ave_interval', default=60, help="""default time to update worker load average""" ), cfg.IntOpt('disk_usage_interval', default=300, help="""Default time to update work disk usage.""" ) ] get_config().register_opts(_CACHE_OPTIONS, group=_STATUS_UPDATE_GROUP) try: init_config()
from pymongo import MongoClient, errors from oslo.config import cfg from meniscus.data.handlers import base from meniscus import config from meniscus import env _LOG = env.get_logger(__name__) #Register options for MongoDB mongodb_group = cfg.OptGroup( name="mongodb", title='MongoDB Configuration Options') config.get_config().register_group(mongodb_group) mongodb_options = [ cfg.ListOpt('servers', default=['localhost:27017'], help="""hostname:port for db servers """ ), cfg.StrOpt('database', default='test', help="""database name """ ), cfg.StrOpt('username', default='test', help="""db username """ ), cfg.StrOpt('password',
from oslo.config import cfg from meniscus.api.personalities import PERSONALITIES from meniscus.config import get_config from meniscus.config import init_config from meniscus.data.model.worker import Worker from meniscus.api.coordinator import coordinator_errors as error # cache configuration options _COORDINATOR_GROUP = cfg.OptGroup(name='coordinator_settings', title='Coordinator Settings') get_config().register_group(_COORDINATOR_GROUP) _COORDINATOR_CONSTANTS = [ cfg.ListOpt('valid_route_list', default=['online', 'draining'], help="""default duration for monitoring failed workers""" ), cfg.ListOpt('valid_status_list', default=['new', 'offline', 'online', 'draining'], help="""count of reported failures""" ) ] get_config().register_opts(_COORDINATOR_CONSTANTS, group=_COORDINATOR_GROUP) try: init_config() conf = get_config() except cfg.ConfigFilesNotFoundError: conf = get_config()
from elasticsearch import Elasticsearch, ElasticsearchException from oslo.config import cfg from meniscus.data.handlers import base from meniscus import config from meniscus import env _LOG = env.get_logger(__name__) #Register options for Elasticsearch elasticsearch_group = cfg.OptGroup(name="elasticsearch", title='Elasticsearch Configuration Options') config.get_config().register_group(elasticsearch_group) elasticsearch_options = [ cfg.ListOpt('servers', default=['localhost:9200'], help="""hostname:port for db servers """), cfg.IntOpt('bulk_size', default=100, help="""Amount of records to transmit in bulk """), cfg.StrOpt('ttl', default="30d", help="""default time to live for documents inserted into the default store """) ] config.get_config().register_opts(elasticsearch_options,
from oslo.config import cfg from meniscus.config import get_config from meniscus.config import init_config from meniscus.data.cache_handler import ConfigCache from meniscus.data.model.worker import WorkerConfiguration from meniscus.ext.plugin import import_module from meniscus import env from meniscus.openstack.common import log log.setup('meniscus') _LOG = env.get_logger(__name__) # default configuration options _node_group = cfg.OptGroup(name='node', title='Node') get_config().register_group(_node_group) _NODE_OPTIONS = [ cfg.StrOpt('personality', default='worker', help="""The personality to load"""), cfg.StrOpt('coordinator_uri', default='http://localhost:8080/v1', help="""The URI of the Coordinator (can be a load balancer)""") ] get_config().register_opts(_NODE_OPTIONS, group=_node_group) try: init_config() conf = get_config() except cfg.ConfigFilesNotFoundError:
from oslo.config import cfg import meniscus from meniscus.validation import jsonv import meniscus.config as config from meniscus import env from meniscus.validation.integration.falcon_hooks import validation_hook _LOG = env.get_logger(__name__) # Celery configuration options _JSON_SCHEMA_GROUP = cfg.OptGroup( name='json_schema', title='Json Schema Options') config.get_config().register_group(_JSON_SCHEMA_GROUP) default_schema_path = '{0}etc/meniscus/schemas/'.format( os.path.dirname(meniscus.__file__).rstrip('meniscus')) _JSON_SCHEMA = [ cfg.StrOpt('schema_dir', default=default_schema_path, help="""directory holding json schema files""" ) ] config.get_config().register_opts(_JSON_SCHEMA, group=_JSON_SCHEMA_GROUP) try: config.init_config()
from oslo.config import cfg from meniscus.config import get_config # Handler configuration options datasource_group = cfg.OptGroup(name='datasource', title='Datasource Configuration Options') get_config().register_group(datasource_group) HANDLER_OPTIONS = [ cfg.StrOpt('handler_name', default='memory', help="""Sets the name of the handler to load for datasource interactions. e.g. mongodb """ ), cfg.BoolOpt('verbose', default=False, help="""Sets whether or not the datasource handlers should be verbose in their logging output. """ ) ] get_config().register_opts(HANDLER_OPTIONS, group=datasource_group) # Handler registration _DATASOURCE_HANDLERS = DatasourceHandlerManager() STATUS_NEW = 'NEW' STATUS_CONNECTED = 'CONNTECTED' STATUS_CLOSED = 'CLOSED'
from oslo.config import cfg from meniscus.config import get_config from meniscus.config import init_config from meniscus.data.model.util import load_tenant_from_dict from meniscus.data.model.util import load_token_from_dict from meniscus.data.model.worker import WorkerConfiguration from meniscus.openstack.common import jsonutils from meniscus.proxy import NativeProxy # cache configuration options _cache_group = cfg.OptGroup(name='cache', title='Cache Options') get_config().register_group(_cache_group) _CACHE_OPTIONS = [ cfg.IntOpt('default_expires', default=900, help="""default time to keep items in cache""" ), cfg.IntOpt('config_expires', default=0, help="""Default time to keep worker config items in cache.""" ), cfg.StrOpt('cache_config', default='cache-config', help="""The name of the cache to store worker config values""" ), cfg.StrOpt('cache_tenant', default='cache-tenant', help="""The name of the cache to store worker config values"""
from oslo.config import cfg import simplejson as json import zmq import meniscus.config as config from meniscus import env _LOG = env.get_logger(__name__) # ZMQ configuration options _ZMQ_GROUP = cfg.OptGroup( name='zmq_in', title='ZeroMQ Input Options') config.get_config().register_group(_ZMQ_GROUP) _ZMQ_OPTS = [ cfg.ListOpt('zmq_upstream_hosts', default=['127.0.0.1:5000'], help='list of upstream host:port pairs to poll for ' 'zmq messages') ] config.get_config().register_opts(_ZMQ_OPTS, group=_ZMQ_GROUP) try: config.init_config() except config.cfg.ConfigFilesNotFoundError as ex: _LOG.exception(ex.message)
from meniscus.api.utils.request import http_request from oslo.config import cfg from meniscus.api.coordinator import coordinator_flow from meniscus.api.coordinator import coordinator_errors from meniscus.api.personalities import PERSONALITIES from meniscus.config import get_config from meniscus.config import init_config from meniscus.data.model.worker import WatchlistItem from meniscus.data.model.worker import Worker from meniscus.openstack.common import jsonutils # cache configuration options _WATCHLIST_GROUP = cfg.OptGroup(name='watchlist_settings', title='Watchlist Settings') get_config().register_group(_WATCHLIST_GROUP) _WATCHLIST_THRESHOLDS = [ cfg.IntOpt('failure_tolerance_seconds', default=60, help="""default duration for monitoring failed workers"""), cfg.IntOpt('watchlist_count_threshold', default=5, help="""count of reported failures""") ] get_config().register_opts(_WATCHLIST_THRESHOLDS, group=_WATCHLIST_GROUP) try: init_config() conf = get_config() except cfg.ConfigFilesNotFoundError:
from oslo.config import cfg from meniscus.api.utils.request import http_request from meniscus.config import get_config from meniscus.config import init_config from meniscus.data.cache_handler import ConfigCache from meniscus.openstack.common import jsonutils from meniscus.queue import celery from meniscus.data.model.worker import SystemInfo from meniscus import env _LOG = env.get_logger(__name__) # cache configuration options _STATUS_UPDATE_GROUP = cfg.OptGroup(name='status_update', title='Status Update Settings') get_config().register_group(_STATUS_UPDATE_GROUP) _CACHE_OPTIONS = [ cfg.IntOpt('worker_status_interval', default=60, help="""default time to update the worker status""") ] get_config().register_opts(_CACHE_OPTIONS, group=_STATUS_UPDATE_GROUP) try: init_config() conf = get_config() except cfg.ConfigFilesNotFoundError: conf = get_config() WORKER_STATUS_INTERVAL = conf.status_update.worker_status_interval