Пример #1
0
def start_up():
    try:
        config.init_config()
    except config.cfg.ConfigFilesNotFoundError as ex:
        _LOG.exception(ex.message)

    conf = config.get_config()

    application = api = falcon.API()
    api.add_route('/', VersionResource())

    #http correlation endpoint
    api.add_route('/v1/tenant/{tenant_id}/publish', PublishMessageResource())

    #syslog correlation endpoint
    server = SyslogServer(("0.0.0.0", 5140), syslog.MessageHandler(conf))
    server.start()

    syslog_server_proc = Process(target=start_io)
    syslog_server_proc.start()
    _LOG.info('Syslog server started as process: {}'.format(
        syslog_server_proc.pid))

    celery.conf.CELERYBEAT_SCHEDULE = {
        'worker_stats': {
            'task': 'stats.publish',
            'schedule': timedelta(seconds=publish_stats.WORKER_STATUS_INTERVAL)
        },
    }

    #include blank argument to celery in order for beat to start correctly
    celery_proc = Process(target=celery.worker_main, args=[['', '--beat']])
    celery_proc.start()
    _LOG.info('Celery started as process: {}'.format(celery_proc.pid))
    return application
Пример #2
0
    def test_loading(self):
        init_config(['--config-file', '../etc/meniscus/meniscus.conf'])

        conf = get_config()
        conf.register_group(test_group)
        conf.register_opts(CFG_TEST_OPTIONS, group=test_group)

        self.assertTrue(conf.test.should_pass)
Пример #3
0
    def test_loading(self):
        try:
            init_config(['--config-file', '../etc/meniscus/meniscus.conf'])
        except:
            init_config(['--config-file', './etc/meniscus/meniscus.conf'])

        conf = get_config()
        conf.register_group(test_group)
        conf.register_opts(CFG_TEST_OPTIONS, group=test_group)

        self.assertTrue(conf.test.should_pass)
Пример #4
0
    def test_loading(self):
        try:
            init_config(['--config-file', '../etc/meniscus/meniscus.conf'])
        except:
            print('ass {}'.format(os.getcwd()))
            init_config(['--config-file', './etc/meniscus/meniscus.conf'])

        conf = get_config()
        conf.register_group(test_group)
        conf.register_opts(CFG_TEST_OPTIONS, group=test_group)

        self.assertTrue(conf.test.should_pass)
Пример #5
0
def db_handler():
    try:
        config.init_config()
    except config.cfg.ConfigFilesNotFoundError:
        #TODO(dmend) Log config error
        pass

    conf = config.get_config()
    _handler = datasource_handler(conf)
    _handler.connect()

    return _handler
Пример #6
0
def start_up():
    try:
        config.init_config()
    except config.cfg.ConfigFilesNotFoundError as ex:
        _LOG.exception(ex.message)

    application = api = falcon.API()
    api.add_route('/', VersionResource())

    #http correlation endpoint
    api.add_route('/v1/tenant/{tenant_id}/publish', PublishMessageResource())

    #syslog correlation endpoint
    server = receiver.new_correlation_input_server()

    server_proc = Process(target=server.start)
    server_proc.start()

    _LOG.info(
        'ZeroMQ reception server started as process: {}'.format(
            server_proc.pid)
    )

    celery.conf.CELERYBEAT_SCHEDULE = {
        'worker_stats': {
            'task': 'stats.publish',
            'schedule': timedelta(seconds=publish_stats.WORKER_STATUS_INTERVAL)
        },
    }

    #include blank argument to celery in order for beat to start correctly
    celery_proc = Process(target=celery.worker_main, args=[['', '--beat']])
    celery_proc.start()
    _LOG.info(
        'Celery started as process: {}'.format(celery_proc.pid)
    )

    es_flusher = ElasticSearchStreamBulker()
    flush_proc = Process(target=es_flusher.start)
    flush_proc.start()
    _LOG.info(
        'ElasticSearchStreamBulker started as process: {}'.format(
            flush_proc.pid)
    )
    return application
Пример #7
0
def start_up():
    try:
        config.init_config()
    except config.cfg.ConfigFilesNotFoundError as ex:
        _LOG.exception(ex.message)

    conf = config.get_config()

    application = api = falcon.API()
    api.add_route('/', VersionResource())

    #http correlation endpoint
    api.add_route('/v1/tenant/{tenant_id}/publish', PublishMessageResource())

    #syslog correlation endpoint
    server = SyslogServer(
        ("0.0.0.0", 5140), syslog.MessageHandler(conf))
    server.start()

    syslog_server_proc = Process(target=start_io)
    syslog_server_proc.start()
    _LOG.info(
        'Syslog server started as process: {}'.format(syslog_server_proc.pid)
    )

    celery.conf.CELERYBEAT_SCHEDULE = {
        'worker_stats': {
            'task': 'stats.publish',
            'schedule': timedelta(seconds=publish_stats.WORKER_STATUS_INTERVAL)
        },
    }

    #include blank argument to celery in order for beat to start correctly
    #celery_proc = Process(target=celery.worker_main, args=[['', '--beat']])
    celery_proc = Process(target=celery.worker_main)
    celery_proc.start()
    _LOG.info(
        'Celery started as process: {}'.format(celery_proc.pid)
    )
    return application
Пример #8
0
_WATCHLIST_GROUP = cfg.OptGroup(name='watchlist_settings',
                                title='Watchlist Settings')
get_config().register_group(_WATCHLIST_GROUP)

_WATCHLIST_THRESHOLDS = [
    cfg.IntOpt('failure_tolerance_seconds',
               default=60,
               help="""default duration for monitoring failed workers"""),
    cfg.IntOpt('watchlist_count_threshold',
               default=5,
               help="""count of reported failures""")
]

get_config().register_opts(_WATCHLIST_THRESHOLDS, group=_WATCHLIST_GROUP)
try:
    init_config()
    conf = get_config()
except cfg.ConfigFilesNotFoundError:
    conf = get_config()

FAILURE_TOLERANCE_SECONDS = conf.watchlist_settings.failure_tolerance_seconds
WATCHLIST_COUNT_THRESHOLD = conf.watchlist_settings.watchlist_count_threshold


def _add_watchlist_item(db, worker_id):
    """
    adds item to watchlist
    """
    watch_item = WatchlistItem(worker_id)
    db.put('watchlist', watch_item.format())
Пример #9
0
               default='cache-config',
               help="""The name of the cache to store worker config values"""
               ),
    cfg.StrOpt('cache_tenant',
               default='cache-tenant',
               help="""The name of the cache to store worker config values"""
               ),
    cfg.StrOpt('cache_token',
               default='cache-token',
               help="""The name of the cache to store worker config values"""
               )
]

get_config().register_opts(_CACHE_OPTIONS, group=_cache_group)
try:
    init_config()
    conf = get_config()
except cfg.ConfigFilesNotFoundError:
    conf = get_config()

DEFAULT_EXPIRES = conf.cache.default_expires
CONFIG_EXPIRES = conf.cache.config_expires
CACHE_CONFIG = conf.cache.cache_config
CACHE_TENANT = conf.cache.cache_tenant
CACHE_TOKEN = conf.cache.cache_token


class Cache(object):
    def __init__(self):
        self.cache = NativeProxy()
Пример #10
0
# Normalization configuration options
_NORMALIZATION_GROUP = cfg.OptGroup(name='liblognorm',
                                    title='Liblognorm options')
config.get_config().register_group(_NORMALIZATION_GROUP)

_NORMALIZATION = [
    cfg.StrOpt('rules_dir',
               default=None,
               help="""directory to load rules from""")
]

config.get_config().register_opts(_NORMALIZATION, group=_NORMALIZATION_GROUP)

try:
    config.init_config()
except config.cfg.ConfigFilesNotFoundError as ex:
    _LOG.exception(ex.message)


def get_normalizer(conf=config.get_config()):
    """This returns both a normalizer as well as a list of loaded rules"""
    normalization_conf = conf.liblognorm
    normalizer = LogNormalizer()
    loaded_rules = list()
    if normalization_conf.rules_dir:
        loaded_rules = load_rules(normalizer, normalization_conf.rules_dir)
    return (normalizer, loaded_rules)


def load_rules(normalizer, path):
Пример #11
0
 def setUp(self):
     init_config(['--config-file', 'meniscus.cfg'])
     conf = get_config()
     self.handler = datasource_handler(conf)
     self.handler.connect()
Пример #12
0
               help="""base HDFS directory to use"""
               ),
    cfg.IntOpt('transaction_expire',
               default=300,
               help="""length of time for a write to hdfs before expiring"""
               ),
    cfg.IntOpt('transfer_frequency',
               default=60,
               help="""frequency to write records to hdfs"""
               )
]

config.get_config().register_opts(_hdfs_options, group=_hdfs_group)

try:
    config.init_config()
except config.cfg.ConfigFilesNotFoundError as ex:
    _LOG.exception(ex.message)

conf = config.get_config()

TRANSACTION_EXPIRE = conf.hdfs_sink.transaction_expire
FREQUENCY = conf.hdfs_sink.transfer_frequency
SINK = 'hdfs'


class HdfsTransaction(transaction.BatchMessageTransaction):

    def _process_locked_records(self):

        write_dir = "{0}/{1}".format(
Пример #13
0
 def setUp(self):
     init_config(['--config-file', 'meniscus.cfg'])
     conf = get_config()
     self.handler = datasource_handler(conf)
     self.handler.connect()