def __main__(args=sys.argv, requests_impl_map=None): setup_conf() CONF(args=args[1:], prog="os-collect-config") log.setup("os-collect-config") unknown_collectors = set(CONF.collectors) - set(DEFAULT_COLLECTORS) if unknown_collectors: raise exc.InvalidArguments( 'Unknown collectors %s. Valid collectors are: %s' % (list(unknown_collectors), DEFAULT_COLLECTORS)) while True: (any_changed, content) = collect_all( cfg.CONF.collectors, store=bool(CONF.command), requests_impl_map=requests_impl_map) if CONF.command: if any_changed: env = dict(os.environ) env["OS_CONFIG_FILES"] = ':'.join(content) logger.info("Executing %s" % CONF.command) subprocess.call(CONF.command, env=env, shell=True) for collector in cfg.CONF.collectors: cache.commit(collector) else: logger.debug("No changes detected.") if CONF.one_time: break else: logger.info("Sleeping %.2f seconds.", CONF.polling_interval) time.sleep(CONF.polling_interval) else: print(json.dumps(content, indent=1)) break
def test_log_config_append_disable_existing_loggers(self): self.config(log_config_append=self.log_config_append) with mock.patch('logging.config.fileConfig') as fileConfig: log.setup('test_log_config_append') fileConfig.assert_called_once_with(self.log_config_append, disable_existing_loggers=False)
def main(): CONF(sys.argv[1:], project='oslo') logging.setup("oslo") with contextlib.closing(impl_zmq.ZmqProxy(CONF)) as reactor: reactor.consume_in_thread() reactor.wait()
def setUp(self): super(LogLevelTestCase, self).setUp() levels = CONF.default_log_levels levels.append("nova-test=AUDIT") self.config(default_log_levels=levels, verbose=True) log.setup("testing") self.log = log.getLogger("nova-test")
def setUp(self): super(LogLevelTestCase, self).setUp() levels = CONF.default_log_levels levels.append("nova-test=AUDIT") self.config(default_log_levels=levels, verbose=True) log.setup('testing') self.log = log.getLogger('nova-test')
def setUp(self): super(LogLevelTestCase, self).setUp() self.CONF = self.useFixture(config.Config()).conf levels = self.CONF.default_log_levels levels.append("nova-test=AUDIT") self.config = self.useFixture(config.Config()).config self.config(default_log_levels=levels, verbose=True) log.setup('testing') self.log = log.getLogger('nova-test')
def setUp(self): super(LogLevelTestCase, self).setUp() self.CONF = self.useFixture(config.Config()).conf levels = self.CONF.default_log_levels levels.append("nova-test=AUDIT") self.config = self.useFixture(config.Config()).config self.config(default_log_levels=levels, verbose=True) log.setup("testing") self.log = log.getLogger("nova-test")
def prepare_service(argv=None): gettextutils.install('openstack') gettextutils.enable_lazy() log_levels = (cfg.CONF.default_log_levels) cfg.set_defaults(log.log_opts, default_log_levels=log_levels) if argv is None: argv = sys.argv cfg.CONF(argv[1:], project='persister') log.setup('persister') LOG.info('Service has started!')
def prepare_service(argv=None): gettextutils.install('openstack') gettextutils.enable_lazy() log_levels = (cfg.CONF.default_log_levels) cfg.set_defaults(log.log_opts, default_log_levels=log_levels) if argv is None: argv = sys.argv cfg.CONF(argv[1:], project='monasca-anomaly') log.setup('monasca-anomaly') LOG.info('Service has started!')
def main(): log.setup('collect-client') CONF(sys.argv[1:], project='os-collect-config-client', version=version.version_info.version_string()) transport = messaging.get_transport(cfg.CONF) client = CollectClient(transport) with open(CONF.json_file) as fl: dct = json.load(fl) client.apply_config(dct)
def main(argv=["--config-file", "/etc/monasca/anomaly-engine.yaml"]): log_levels = cfg.CONF.default_log_levels cfg.set_defaults(log.log_opts, default_log_levels=log_levels) cfg.CONF(["--config-file", "/etc/monasca/anomaly-engine.yaml"], project="monasca-anomaly") log.setup("monasca-anomaly") for instance in cfg.CONF.rde.instances: # get instance config instance_opts = [ cfg.StrOpt("kafka_group"), cfg.BoolOpt("normalized"), cfg.BoolOpt("ad3"), cfg.FloatOpt("anom_threshold"), cfg.FloatOpt("normal_threshold"), cfg.IntOpt("fault_ittr"), cfg.IntOpt("normal_ittr"), cfg.StrOpt("sample_name"), cfg.ListOpt("dimension_match"), cfg.ListOpt("sample_metrics"), ] instance_group = cfg.OptGroup(name=instance, title=instance) cfg.CONF.register_group(instance_group) cfg.CONF.register_opts(instance_opts, instance_group) # start and add to processors rde_anomaly_processor = multiprocessing.Process(target=RDEAnomalyProcessor(instance).run) processors.append(rde_anomaly_processor) # nupic_anomaly_processor = multiprocessing.Process(target=NupicAnomalyProcessor().run) # processors.append(nupic_anomaly_processor) # ks_anomaly_processor = multiprocessing.Process(target=KsAnomalyProcessor().run) # processors.append(ks_anomaly_processor) try: LOG.info("Starting processes") for process in processors: process.start() # The signal handlers must be added after the processes start otherwise they run on all processes signal.signal(signal.SIGCHLD, clean_exit) signal.signal(signal.SIGINT, clean_exit) signal.signal(signal.SIGTERM, clean_exit) while True: time.sleep(5) except Exception: LOG.exception("Error! Exiting.") for process in processors: process.terminate()
def test_error_notification(self): self.stubs.Set(cfg.CONF, "notification_driver", ["openstack.common.notifier.rabbit_notifier"]) self.stubs.Set(cfg.CONF, "publish_errors", True) LOG = log.getLogger("common") log.setup(None) msgs = [] def mock_notify(context, topic, data): msgs.append(data) self.stubs.Set(rpc, "notify", mock_notify) LOG.error("foo") self.assertEqual(1, len(msgs)) msg = msgs[0] self.assertEqual(msg["event_type"], "error_notification") self.assertEqual(msg["priority"], "ERROR") self.assertEqual(msg["payload"]["error"], "foo")
def test_error_notification(self): self.stubs.Set(cfg.CONF, 'notification_driver', ['openstack.common.notifier.rabbit_notifier']) self.stubs.Set(cfg.CONF, 'publish_errors', True) LOG = log.getLogger('common') log.setup(None) msgs = [] def mock_notify(context, topic, data): msgs.append(data) self.stubs.Set(rpc, 'notify', mock_notify) LOG.error('foo') self.assertEqual(1, len(msgs)) msg = msgs[0] self.assertEqual(msg['event_type'], 'error_notification') self.assertEqual(msg['priority'], 'ERROR') self.assertEqual(msg['payload']['error'], 'foo')
def test_error_notification(self): self.config(publish_errors=True, use_stderr=False) def mock_notify(context, message): msgs.append(message) msgs = [] self.stubs.Set(no_op_notifier, "notify", mock_notify) LOG = log.getLogger("test_error_notification.common") log.setup("test_error_notification") LOG.error("foo") self.assertEqual(1, len(msgs)) msg = msgs[0] self.assertEqual(msg["event_type"], "error_notification") self.assertEqual(msg["priority"], "ERROR") self.assertEqual(msg["payload"]["error"], "foo")
def test_error_notification(self): self.config(publish_errors=True, use_stderr=False) def mock_notify(context, message): msgs.append(message) msgs = [] self.stubs.Set(no_op_notifier, 'notify', mock_notify) LOG = log.getLogger('test_error_notification.common') log.setup('test_error_notification') LOG.error('foo') self.assertEqual(1, len(msgs)) msg = msgs[0] self.assertEqual(msg['event_type'], 'error_notification') self.assertEqual(msg['priority'], 'ERROR') self.assertEqual(msg['payload']['error'], 'foo')
def __main__(args=sys.argv, requests_impl_map=None): setup_conf() CONF(args=args[1:], prog="os-collect-config") log.setup("os-collect-config") (any_changed, content) = collect_all(COLLECTORS, store=bool(CONF.command), requests_impl_map=requests_impl_map) if CONF.command: if any_changed: env = dict(os.environ) env["OS_CONFIG_FILES"] = ':'.join(content) logger.info("Executing %s" % CONF.command) subprocess.call(CONF.command, env=env, shell=True) for collector in COLLECTORS: cache.commit(collector.name) else: logger.debug("No changes detected.") else: print json.dumps(content, indent=1)
def test_log_config_append_ok(self): self.config(log_config_append=self.log_config_append) log.setup('test_log_config_append')
def test_will_be_verbose_if_verbose_flag_set(self): self.config(verbose=True) log.setup("test_is_verbose") logger = logging.getLogger("test_is_verbose") self.assertEqual(logging.INFO, logger.getEffectiveLevel())
def test_will_not_be_verbose_if_verbose_flag_not_set(self): self.config(verbose=False) log.setup() self.assertEqual(logging.INFO, self.log.logger.getEffectiveLevel())
def test_excepthook_installed(self): log.setup("test_excepthook_installed") self.assertTrue(sys.excepthook != sys.__excepthook__)
def test_will_not_be_verbose_if_verbose_flag_not_set(self): self.config(verbose=False) log.setup("test_is_not_verbose") logger = logging.getLogger("test_is_not_verbose") self.assertEqual(logging.WARNING, logger.getEffectiveLevel())
def setup_agent(): global logger CONF(sys.argv[1:], project='os-collect-config', version=version.version_info.version_string()) log.setup('os-collect-config') logger = log.getLogger(__name__)
def test_will_be_verbose_if_verbose_flag_set(self): self.config(verbose=True) log.setup() self.assertEqual(logging.DEBUG, self.log.logger.getEffectiveLevel())
def test_log_config_ok(self): log_config = self._create_tempfile("logging", self.minimal_config) self.config(log_config=log_config) log.setup("test_log_config")
cfg.StrOpt('database_name'), cfg.StrOpt('ip_address'), cfg.StrOpt('port'), cfg.StrOpt('user'), cfg.StrOpt('password') ] influxdb_group = cfg.OptGroup(name='influxdb', title='influxdb') cfg.CONF.register_group(influxdb_group) cfg.CONF.register_opts(influxdb_opts, influxdb_group) cfg.CONF(sys.argv[1:]) log_levels = (cfg.CONF.default_log_levels) cfg.set_defaults(log.log_opts, default_log_levels=log_levels) log.setup("monasca-persister") def main(): metric_persister = MetricPersister(cfg.CONF.kafka_metrics, cfg.CONF.influxdb) alarm_persister = AlarmPersister(cfg.CONF.kafka_alarm_history, cfg.CONF.influxdb) metric_persister.start() alarm_persister.start() LOG.info(''' _____
from oslo import messaging from oslo.config import cfg from common.utility import * from openstack.common import log as logging import logging as std_logging import sys LOG = logging.getLogger(__name__) if __name__ == '__main__': cfg.CONF(sys.argv[1:], project='solum') logging.setup('solum') cfg.CONF.log_opt_values(LOG, std_logging.DEBUG) serializer = RequestContextSerializer(JsonPayloadSerializer()) transport = messaging.get_transport(cfg.CONF, aliases=TRANSPORT_ALIASES) topic = 'server-test' target = messaging.Target(topic=topic) client = messaging.RPCClient(transport, target, serializer=serializer) client.cast({}, 'echo')
influxdb_opts = [cfg.StrOpt('database_name'), cfg.StrOpt('ip_address'), cfg.StrOpt('port'), cfg.StrOpt('user'), cfg.StrOpt('password')] influxdb_group = cfg.OptGroup(name='influxdb', title='influxdb') cfg.CONF.register_group(influxdb_group) cfg.CONF.register_opts(influxdb_opts, influxdb_group) cfg.CONF(sys.argv[1:]) log_levels = (cfg.CONF.default_log_levels) cfg.set_defaults(log.log_opts, default_log_levels=log_levels) log.setup("monasca-persister") def main(): metric_persister = MetricPersister(cfg.CONF.kafka_metrics, cfg.CONF.influxdb) alarm_persister = AlarmPersister(cfg.CONF.kafka_alarm_history, cfg.CONF.influxdb) metric_persister.start() alarm_persister.start() LOG.info(''' _____
def config_for_engine(argv): config.parse_args(argv) logging.setup('forest') engine = get_engine() return engine
def test_log_config_ok(self): log_config = self._create_tempfile('logging', self.minimal_config) self.config(log_config=log_config) log.setup('test_log_config')
def test_will_be_debug_if_debug_flag_set(self): self.config(debug=True) log.setup("test_is_debug") logger = logging.getLogger("test_is_debug") self.assertEqual(logging.DEBUG, logger.getEffectiveLevel())
def __main__(args=sys.argv, requests_impl_map=None): signal.signal(signal.SIGHUP, reexec_self) setup_conf() CONF(args=args[1:], prog="os-collect-config", version=version.version_info.version_string()) # This resets the logging infrastructure which prevents capturing log # output in tests cleanly, so should only be called if there isn't already # handlers defined i.e. not in unit tests if not log.getLogger(None).logger.handlers: log.setup("os-collect-config") if CONF.print_cachedir: print(CONF.cachedir) return unknown_collectors = set(CONF.collectors) - set(DEFAULT_COLLECTORS) if unknown_collectors: raise exc.InvalidArguments( "Unknown collectors %s. Valid collectors are: %s" % (list(unknown_collectors), DEFAULT_COLLECTORS) ) if CONF.force: CONF.set_override("one_time", True) config_files = CONF.config_file config_hash = getfilehash(config_files) while True: store_and_run = bool(CONF.command and not CONF.print_only) (any_changed, content) = collect_all( cfg.CONF.collectors, store=store_and_run, requests_impl_map=requests_impl_map ) if store_and_run: if any_changed or CONF.force: # ignore HUP now since we will reexec after commit anyway signal.signal(signal.SIGHUP, signal.SIG_IGN) try: call_command(content, CONF.command) except subprocess.CalledProcessError as e: logger.error("Command failed, will not cache new data. %s" % e) if not CONF.one_time: new_config_hash = getfilehash(config_files) if config_hash == new_config_hash: logger.warn("Sleeping %.2f seconds before re-exec." % CONF.polling_interval) time.sleep(CONF.polling_interval) else: # The command failed but the config file has # changed re-exec now as the config file change # may have fixed things. logger.warn("Config changed, re-execing now") config_hash = new_config_hash else: for collector in cfg.CONF.collectors: cache.commit(collector) if not CONF.one_time: reexec_self() else: logger.debug("No changes detected.") if CONF.one_time: break else: logger.info("Sleeping %.2f seconds.", CONF.polling_interval) time.sleep(CONF.polling_interval) else: print(json.dumps(content, indent=1)) break