Ejemplo n.º 1
0
    def test_logging_config(self):
        logging_opts = [
            cfg.StrOpt('level', default='INFO'),
            cfg.StrOpt('file', default='/var/log/monasca/monasca.log'),
            cfg.StrOpt('size', default=10485760),
            cfg.StrOpt('backup', default=5),
            cfg.StrOpt('kazoo', default="WARN"),
            cfg.StrOpt('kafka', default="WARN"),
            cfg.StrOpt('iso8601', default="WARN"),
            cfg.StrOpt('statsd', default="WARN")
        ]
        logging_group = cfg.OptGroup(name='logging', title='logging')
        cfg.CONF.register_group(logging_group)
        cfg.CONF.register_opts(logging_opts, logging_group)

        tempfile_path = tempfile.mkstemp()[1]
        try:
            outfile = open(tempfile_path, 'w')
            outfile.writelines(
                ['[logging]\n', 'level = DEBUG\n', 'backup = 3\n'])
            outfile.close()

            cfg.CONF(args=[],
                     project='test',
                     default_config_files=[tempfile_path])
            log_config = dict_config.get_config(cfg.CONF)
        finally:
            os.remove(tempfile_path)
        self.assertEqual(log_config['handlers']['file']['backupCount'], str(3))
Ejemplo n.º 2
0
    def test_logging_config(self):
        logging_opts = [
            cfg.StrOpt("level", default="INFO"),
            cfg.StrOpt("file", default="/var/log/monasca/monasca.log"),
            cfg.StrOpt("size", default=10485760),
            cfg.StrOpt("backup", default=5),
            cfg.StrOpt("kazoo", default="WARN"),
            cfg.StrOpt("kafka", default="WARN"),
            cfg.StrOpt("iso8601", default="WARN"),
            cfg.StrOpt("statsd", default="WARN"),
        ]
        logging_group = cfg.OptGroup(name="logging", title="logging")
        cfg.CONF.register_group(logging_group)
        cfg.CONF.register_opts(logging_opts, logging_group)

        tempfile_path = tempfile.mkstemp()[1]
        try:
            outfile = open(tempfile_path, "w")
            outfile.writelines(["[logging]\n", "level = DEBUG\n", "backup = 3\n"])
            outfile.close()

            cfg.CONF(args=[], project="test", default_config_files=[tempfile_path])
            log_config = dict_config.get_config(cfg.CONF)
        finally:
            os.remove(tempfile_path)
        self.assertEqual(log_config["handlers"]["file"]["backupCount"], str(3))
Ejemplo n.º 3
0
def main(argv=None):
    if argv is None:
        argv = sys.argv
    if len(argv) == 2:
        config_file = argv[1]
    elif len(argv) > 2:
        print("Usage: " + argv[0] + " <config_file>")
        print(
            "Config file defaults to /etc/monasca/monasca_events_engine.conf")
        return 1
    else:
        config_file = '/etc/monasca/monasca_events_engine.conf'

    # init oslo config
    cfg.CONF(args=[],
             project='monasca_events_engine',
             default_config_files=[config_file])

    # register oslo config opts
    opts.register_logging_opts(cfg.CONF)
    opts.register_mysql_opts(cfg.CONF)
    opts.register_kafka_opts(cfg.CONF)
    opts.register_winchester_opts(cfg.CONF)
    opts.register_event_processor_opts(cfg.CONF)
    opts.register_pipeline_processor_opts(cfg.CONF)
    opts.register_zookeeper_opts(cfg.CONF)

    # Setup python logging
    logging.config.dictConfig(dict_config.get_config(cfg.CONF))

    # create EventProcessor(s)
    num_event_processors = cfg.CONF.event_processor.number
    log.info('num_event_processors %d', num_event_processors)
    for x in xrange(0, num_event_processors):
        event_processor = multiprocessing.Process(
            target=EventProcessor(cfg.CONF).run
        )
        processors.append(event_processor)

    # create PipelineProcessor(s)
    num_pipeline_processors = cfg.CONF.pipeline_processor.number
    log.info('num_pipeline_processors %d', num_pipeline_processors)
    for x in xrange(0, num_pipeline_processors):
        pipeline_processor = multiprocessing.Process(
            target=PipelineProcessor(
                cfg.CONF).run
        )
        processors.append(pipeline_processor)

    # Start
    try:
        log.info('Starting processes')
        for process in processors:
            process.start()

        # The signal handlers must be added after the processes start otherwise
        # they run on all processes
        signal.signal(signal.SIGCHLD, clean_exit)
        signal.signal(signal.SIGINT, clean_exit)
        signal.signal(signal.SIGTERM, clean_exit)

        while True:
            time.sleep(5)

    except Exception:
        log.exception('Error! Exiting.')
        for process in processors:
            process.terminate()