def test_good_load_local(self): method = simport.load(PWD + "|localmodule:Foo.method_a") import localmodule self.assertEqual(method, localmodule.Foo.method_a) self.assertEqual(localmodule.function_a, simport.load("localmodule:function_a"))
def launch(conf): config.parse_args() app = falcon.API(request_type=request.Request) # NOTE(dszumski): Falcon 2.0.0 switches the default for this from True # to False so we explicitly set it here to prevent the behaviour # changing between versions. app.req_options.strip_url_path_trailing_slash = True versions = simport.load(cfg.CONF.dispatcher.versions)() app.add_route("/", versions) app.add_route("/{version_id}", versions) # The following resource is a workaround for a regression in falcon 0.3 # which causes the path '/v2.0' to not route to the versions resource version_2_0 = simport.load(cfg.CONF.dispatcher.version_2_0)() app.add_route("/v2.0", version_2_0) healthchecks = simport.load(cfg.CONF.dispatcher.healthchecks)() app.add_route("/healthcheck", healthchecks) if cfg.CONF.enable_metrics_api: launch_metrics_api(app) if cfg.CONF.enable_logs_api: launch_log_api(app) LOG.debug('Dispatcher drivers have been added to the routes!') return app
def test_good_load_internal(self): self.assertEqual( six.get_function_code(dummy_function), six.get_function_code(simport.load("test_simport:dummy_function"))) self.assertEqual( six.get_function_code(DummyClass.method_a), six.get_function_code( simport.load("test_simport:DummyClass.method_a")))
def main(): log.register_options(cfg.CONF) log.set_defaults() cfg.CONF(sys.argv[1:], project='monasca', prog='persister') log.setup(cfg.CONF, "monasca-persister") """Start persister.""" metric_repository = simport.load(cfg.CONF.repositories.metrics_driver) alarm_state_history_repository = simport.load( cfg.CONF.repositories.alarm_state_history_driver) # Add processors for metrics topic for proc in range(0, cfg.CONF.kafka_metrics.num_processors): processors.append( multiprocessing.Process(target=start_process, args=(metric_repository, cfg.CONF.kafka_metrics))) # Add processors for alarm history topic for proc in range(0, cfg.CONF.kafka_alarm_history.num_processors): processors.append( multiprocessing.Process(target=start_process, args=(alarm_state_history_repository, cfg.CONF.kafka_alarm_history))) # Start try: LOG.info(''' _____ / \ ____ ____ _____ ______ ____ _____ / \ / \ / _ \ / \\\__ \ / ___// ___\\\__ \\ / Y ( <_> ) | \/ __ \_\___ \\ \___ / __ \\_ \____|__ /\____/|___| (____ /____ >\___ >____ / \/ \/ \/ \/ \/ \/ __________ .__ __ \______ \ ___________ _____|__| _______/ |_ ___________ | ___// __ \_ __ \/ ___/ |/ ___/\ __\/ __ \_ __ \\ | | \ ___/| | \/\___ \| |\___ \ | | \ ___/| | \/ |____| \___ >__| /____ >__/____ > |__| \___ >__| \/ \/ \/ \/ ''') for process in processors: process.start() # The signal handlers must be added after the processes start otherwise # they run on all processes signal.signal(signal.SIGCHLD, clean_exit) signal.signal(signal.SIGINT, clean_exit) signal.signal(signal.SIGTERM, clean_exit) while True: time.sleep(10) except Exception: LOG.exception('Error! Exiting.') clean_exit(signal.SIGKILL)
def __init__(self): super(Alarming, self).__init__() self.events_message_queue = simport.load( cfg.CONF.messaging.driver)(cfg.CONF.kafka.events_topic) self.alarm_state_transitions_message_queue = simport.load( cfg.CONF.messaging.driver)(cfg.CONF.kafka.alarm_state_transitions_topic)
def __init__(self): super(Alarming, self).__init__() self.events_message_queue = simport.load( cfg.CONF.messaging.driver)('events') self.alarm_state_transitions_message_queue = simport.load( cfg.CONF.messaging.driver)('alarm-state-transitions')
def __init__(self): super(Notifications, self).__init__() self._region = cfg.CONF.region self._notifications_repo = simport.load( cfg.CONF.repositories.notifications_driver)() self._notification_method_type_repo = simport.load( cfg.CONF.repositories.notification_method_type_driver)() self.valid_periods = cfg.CONF.valid_notification_periods
def __init__(self): super(Alarming, self).__init__() self.events_message_queue = simport.load(cfg.CONF.messaging.driver)( cfg.CONF.kafka.events_topic) self.alarm_state_transitions_message_queue = simport.load( cfg.CONF.messaging.driver)( cfg.CONF.kafka.alarm_state_transitions_topic)
def main(): log.register_options(cfg.CONF) log.set_defaults() cfg.CONF(sys.argv[1:], project='monasca', prog='persister') log.setup(cfg.CONF, "monasca-persister") """Start persister.""" metric_repository = simport.load(cfg.CONF.repositories.metrics_driver) alarm_state_history_repository = simport.load(cfg.CONF.repositories.alarm_state_history_driver) # Add processors for metrics topic for proc in range(0, cfg.CONF.kafka_metrics.num_processors): processors.append(multiprocessing.Process( target=start_process, args=(metric_repository, cfg.CONF.kafka_metrics))) # Add processors for alarm history topic for proc in range(0, cfg.CONF.kafka_alarm_history.num_processors): processors.append(multiprocessing.Process( target=start_process, args=(alarm_state_history_repository, cfg.CONF.kafka_alarm_history))) # Start try: LOG.info(''' _____ / \ ____ ____ _____ ______ ____ _____ / \ / \ / _ \ / \\\__ \ / ___// ___\\\__ \\ / Y ( <_> ) | \/ __ \_\___ \\ \___ / __ \\_ \____|__ /\____/|___| (____ /____ >\___ >____ / \/ \/ \/ \/ \/ \/ __________ .__ __ \______ \ ___________ _____|__| _______/ |_ ___________ | ___// __ \_ __ \/ ___/ |/ ___/\ __\/ __ \_ __ \\ | | \ ___/| | \/\___ \| |\___ \ | | \ ___/| | \/ |____| \___ >__| /____ >__/____ > |__| \___ >__| \/ \/ \/ \/ ''') for process in processors: process.start() # The signal handlers must be added after the processes start otherwise # they run on all processes signal.signal(signal.SIGCHLD, clean_exit) signal.signal(signal.SIGINT, clean_exit) signal.signal(signal.SIGTERM, clean_exit) while True: time.sleep(10) except Exception: LOG.exception('Error! Exiting.') clean_exit(signal.SIGKILL)
def __init__(self): try: super(AlarmsStateHistory, self).__init__() self._region = cfg.CONF.region self._alarms_repo = simport.load( cfg.CONF.repositories.alarms_driver)() self._metrics_repo = simport.load( cfg.CONF.repositories.metrics_driver)() except Exception as ex: LOG.exception(ex) raise exceptions.RepositoryException(ex)
def __init__(self): super(Notifications, self).__init__() self._region = cfg.CONF.region self._default_authorized_roles = cfg.CONF.security.default_authorized_roles self._get_notifications_authorized_roles = ( cfg.CONF.security.default_authorized_roles + cfg.CONF.security.read_only_authorized_roles ) self._notifications_repo = simport.load(cfg.CONF.repositories.notifications_driver)() self._notification_method_type_repo = simport.load(cfg.CONF.repositories.notification_method_type_driver)() self.valid_periods = cfg.CONF.valid_notification_periods
def __init__(self): try: super(Metrics, self).__init__() self._region = cfg.CONF.region self._message_queue = simport.load( cfg.CONF.messaging.driver)('metrics') self._metrics_repo = simport.load( cfg.CONF.repositories.metrics_driver)() except Exception as ex: LOG.exception(ex) raise falcon.HTTPInternalServerError('Service unavailable', str(ex))
def __init__(self): try: super(AlarmsStateHistory, self).__init__() self._region = cfg.CONF.region self._get_alarms_authorized_roles = ( cfg.CONF.security.default_authorized_roles + cfg.CONF.security.read_only_authorized_roles ) self._alarms_repo = simport.load(cfg.CONF.repositories.alarms_driver)() self._metrics_repo = simport.load(cfg.CONF.repositories.metrics_driver)() except Exception as ex: LOG.exception(ex) raise exceptions.RepositoryException(ex)
def __init__(self): try: super(AlarmsStateHistory, self).__init__() self._region = cfg.CONF.region self._get_alarms_authorized_roles = ( cfg.CONF.security.default_authorized_roles + cfg.CONF.security.read_only_authorized_roles) self._alarms_repo = simport.load( cfg.CONF.repositories.alarms_driver)() self._metrics_repo = simport.load( cfg.CONF.repositories.metrics_driver)() except Exception as ex: LOG.exception(ex) raise exceptions.RepositoryException(ex)
def __init__(self): super(Notifications, self).__init__() self._region = cfg.CONF.region self._default_authorized_roles = ( cfg.CONF.security.default_authorized_roles) self._get_notifications_authorized_roles = ( cfg.CONF.security.default_authorized_roles + cfg.CONF.security.read_only_authorized_roles) self._notifications_repo = simport.load( cfg.CONF.repositories.notifications_driver)() self._notification_method_type_repo = simport.load( cfg.CONF.repositories.notification_method_type_driver)() self.valid_periods = cfg.CONF.valid_notification_periods
def __init__(self): try: self._metrics_repo = simport.load(CONF.repositories.metrics_driver) except Exception as ex: LOG.exception(ex) raise
def load_plugins(config): for plugin_class in config.get("plugins", []): try: possible_notifiers.append(simport.load(plugin_class)(log)) except Exception: log.exception("unable to load the class {0} , ignoring it".format( plugin_class))
def prepare_processes(conf, repo_driver): if conf.num_processors > 0: repository = simport.load(repo_driver) for proc in range(0, conf.num_processors): processors.append( multiprocessing.Process(target=start_process, args=(repository, conf)))
def test_import_class(self): klass = simport.load( PWD + "/external|monasca_common.tests.external.externalmodule:Blah") import external.externalmodule self.assertEqual(klass, external.externalmodule.Blah)
def reset_kafka_offsets(): """delete all offsets from the offset specification.""" app_name = PreHourlyProcessor.get_app_name() # get the offsets from global var offset_specs = simport.load(cfg.CONF.repositories.offsets)() offset_specs.delete_all_kafka_offsets(app_name)
def __init__(self): try: self._metrics_repo = simport.load( CONF.repositories.metrics_driver) except Exception as ex: LOG.exception(ex) raise
def prepare_processes(conf, repo_driver): if conf.num_processors > 0: repository = simport.load(repo_driver) for proc in range(0, conf.num_processors): processors.append(multiprocessing.Process( target=start_process, args=(repository, conf))) else: LOG.warning("Number of processors (num_processors) is {}".format( conf.num_processors))
def __init__(self): try: super(AlarmDefinitions, self).__init__() self._region = cfg.CONF.region self._alarm_definitions_repo = simport.load( cfg.CONF.repositories.alarm_definitions_driver)() except Exception as ex: LOG.exception(ex) raise exceptions.RepositoryException(ex)
def prepare_processes(conf, repo_driver): if conf.num_processors > 0: repository = simport.load(repo_driver) for proc in range(0, conf.num_processors): processors.append( multiprocessing.Process(target=start_process, args=(repository, conf))) else: LOG.warning("Number of processors (num_processors) is {}".format( conf.num_processors))
def test_good_load_external(self): method = simport.load("tests/external|" "external.externalmodule:Blah.method_b") self.assertTrue("external.externalmodule" in sys.modules) old = sys.modules["external.externalmodule"] import external.externalmodule self.assertEqual(external.externalmodule, sys.modules["external.externalmodule"]) self.assertEqual(old, external.externalmodule) self.assertEqual(method, external.externalmodule.Blah.method_b)
def __init__(self): try: super(Metrics, self).__init__() self._region = cfg.CONF.region self._default_authorized_roles = ( cfg.CONF.security.default_authorized_roles) self._delegate_authorized_roles = ( cfg.CONF.security.delegate_authorized_roles) self._post_metrics_authorized_roles = ( cfg.CONF.security.default_authorized_roles + cfg.CONF.security.agent_authorized_roles) self._message_queue = simport.load(cfg.CONF.messaging.driver)( 'metrics') self._metrics_repo = simport.load( cfg.CONF.repositories.metrics_driver)() except Exception as ex: LOG.exception(ex) raise falcon.HTTPInternalServerError('Service unavailable', ex.message)
def test_good_load_external(self): method = simport.load(PWD + "/external|external.externalmodule:Blah.method_b") self.assertTrue('external.externalmodule' in sys.modules) old = sys.modules['external.externalmodule'] import external.externalmodule self.assertEqual(external.externalmodule, sys.modules['external.externalmodule']) self.assertEqual(old, external.externalmodule) self.assertEqual(method, external.externalmodule.Blah.method_b)
def test_good_load_external(self): method = simport.load("tests/external|" "external.externalmodule:Blah.method_b") self.assertTrue('external.externalmodule' in sys.modules) old = sys.modules['external.externalmodule'] import external.externalmodule self.assertEqual(external.externalmodule, sys.modules['external.externalmodule']) self.assertEqual(old, external.externalmodule) self.assertEqual(method, external.externalmodule.Blah.method_b)
def __init__(self): try: super(Metrics, self).__init__() self._region = cfg.CONF.region self._delegate_authorized_roles = ( cfg.CONF.security.delegate_authorized_roles) self._get_metrics_authorized_roles = ( cfg.CONF.security.default_authorized_roles + cfg.CONF.security.read_only_authorized_roles) self._post_metrics_authorized_roles = ( cfg.CONF.security.default_authorized_roles + cfg.CONF.security.agent_authorized_roles) self._message_queue = simport.load(cfg.CONF.messaging.driver)( 'metrics') self._metrics_repo = simport.load( cfg.CONF.repositories.metrics_driver)() except Exception as ex: LOG.exception(ex) raise falcon.HTTPInternalServerError('Service unavailable', ex.message)
def __init__(self): try: super(AlarmsCount, self).__init__() self._region = cfg.CONF.region self._default_authorized_roles = ( cfg.CONF.security.default_authorized_roles) self._alarms_repo = simport.load( cfg.CONF.repositories.alarms_driver)() except Exception as ex: LOG.exception(ex) raise exceptions.RepositoryException(ex)
def save_kafka_offsets(current_offsets, app_name, batch_time_info): """save current offsets to offset specification.""" offset_specs = simport.load(cfg.CONF.repositories.offsets)() for o in current_offsets: MonMetricsKafkaProcessor.log_debug( "saving: OffSetRanges: %s %s %s %s, " "batch_time_info: %s" % (o.topic, o.partition, o.fromOffset, o.untilOffset, str(batch_time_info))) # add new offsets, update revision offset_specs.add_all_offsets(app_name, current_offsets, batch_time_info)
def __init__(self): try: super(DimensionNames, self).__init__() self._region = cfg.CONF.region self._get_metrics_authorized_roles = ( cfg.CONF.security.default_authorized_roles + cfg.CONF.security.read_only_authorized_roles) self._metrics_repo = simport.load( cfg.CONF.repositories.metrics_driver)() except Exception as ex: LOG.exception(ex) raise falcon.HTTPInternalServerError('Service unavailable', ex.message)
def __init__(self): try: super(Metrics, self).__init__() self._region = cfg.CONF.region self._delegate_authorized_roles = ( cfg.CONF.security.delegate_authorized_roles) self._get_metrics_authorized_roles = ( cfg.CONF.security.default_authorized_roles + cfg.CONF.security.read_only_authorized_roles) self._post_metrics_authorized_roles = ( cfg.CONF.security.default_authorized_roles + cfg.CONF.security.agent_authorized_roles) self._message_queue = simport.load(cfg.CONF.messaging.driver)( 'metrics') self._metrics_repo = simport.load( cfg.CONF.repositories.metrics_driver)() except Exception as ex: LOG.exception(ex) raise falcon.HTTPInternalServerError('Service unavailable', ex.message) self._statsd_rejected_count = STATSD_CLIENT.get_counter(METRICS_REJECTED_COUNT)
def get_kafka_stream(topic, streaming_context): offset_specifications = simport.load(cfg.CONF.repositories.offsets)() app_name = streaming_context.sparkContext.appName saved_offset_spec = offset_specifications.get_kafka_offsets(app_name) if len(saved_offset_spec) < 1: MonMetricsKafkaProcessor.log_debug( "No saved offsets available..." "connecting to kafka without specifying offsets") kvs = KafkaUtils.createDirectStream( streaming_context, [topic], {"metadata.broker.list": cfg.CONF.messaging.brokers}) return kvs else: from_offsets = {} for key, value in saved_offset_spec.items(): if key.startswith("%s_%s" % (app_name, topic)): # spec_app_name = value.get_app_name() spec_topic = value.get_topic() spec_partition = int(value.get_partition()) # spec_from_offset = value.get_from_offset() spec_until_offset = value.get_until_offset() # composite_key = "%s_%s_%s" % (spec_app_name, # spec_topic, # spec_partition) # partition = saved_offset_spec[composite_key] from_offsets[ TopicAndPartition(spec_topic, spec_partition) ] = int(spec_until_offset) MonMetricsKafkaProcessor.log_debug( "get_kafka_stream: calling createDirectStream :" " topic:{%s} : start " % topic) for key, value in from_offsets.items(): MonMetricsKafkaProcessor.log_debug( "get_kafka_stream: calling createDirectStream : " "offsets : TopicAndPartition:{%s,%s}, value:{%s}" % (str(key._topic), str(key._partition), str(value))) MonMetricsKafkaProcessor.log_debug( "get_kafka_stream: calling createDirectStream : " "topic:{%s} : done" % topic) kvs = KafkaUtils.createDirectStream( streaming_context, [topic], {"metadata.broker.list": cfg.CONF.messaging.brokers}, from_offsets) return kvs
def get_kafka_stream(topic, streaming_context): offset_specifications = simport.load(cfg.CONF.repositories.offsets)() app_name = streaming_context.sparkContext.appName saved_offset_spec = offset_specifications.get_kafka_offsets(app_name) if len(saved_offset_spec) < 1: MonMetricsKafkaProcessor.log_debug( "No saved offsets available..." "connecting to kafka without specifying offsets") kvs = KafkaUtils.createDirectStream( streaming_context, [topic], {"metadata.broker.list": cfg.CONF.messaging.brokers}) return kvs else: from_offsets = {} for key, value in saved_offset_spec.items(): if key.startswith("%s_%s" % (app_name, topic)): # spec_app_name = value.get_app_name() spec_topic = value.get_topic() spec_partition = int(value.get_partition()) # spec_from_offset = value.get_from_offset() spec_until_offset = value.get_until_offset() # composite_key = "%s_%s_%s" % (spec_app_name, # spec_topic, # spec_partition) # partition = saved_offset_spec[composite_key] from_offsets[ TopicAndPartition(spec_topic, spec_partition) ] = long(spec_until_offset) MonMetricsKafkaProcessor.log_debug( "get_kafka_stream: calling createDirectStream :" " topic:{%s} : start " % topic) for key, value in from_offsets.items(): MonMetricsKafkaProcessor.log_debug( "get_kafka_stream: calling createDirectStream : " "offsets : TopicAndPartition:{%s,%s}, value:{%s}" % (str(key._topic), str(key._partition), str(value))) MonMetricsKafkaProcessor.log_debug( "get_kafka_stream: calling createDirectStream : " "topic:{%s} : done" % topic) kvs = KafkaUtils.createDirectStream( streaming_context, [topic], {"metadata.broker.list": cfg.CONF.messaging.brokers}, from_offsets) return kvs
def __init__(self): try: super(DimensionValues, self).__init__() self._region = cfg.CONF.region self._delegate_authorized_roles = ( cfg.CONF.security.delegate_authorized_roles) self._get_metrics_authorized_roles = ( cfg.CONF.security.default_authorized_roles + cfg.CONF.security.read_only_authorized_roles) self._metrics_repo = simport.load( cfg.CONF.repositories.metrics_driver)() except Exception as ex: LOG.exception(ex) raise falcon.HTTPInternalServerError('Service unavailable', str(ex))
def save_kafka_offsets(current_offsets, app_name, batch_time_info): """save current offsets to offset specification.""" offset_specs = simport.load(cfg.CONF.repositories.offsets)() for o in current_offsets: MonMetricsKafkaProcessor.log_debug( "saving: OffSetRanges: %s %s %s %s, " "batch_time_info: %s" % ( o.topic, o.partition, o.fromOffset, o.untilOffset, str(batch_time_info))) # add new offsets, update revision offset_specs.add_all_offsets(app_name, current_offsets, batch_time_info)
def test_good_load_external(self): method = simport.load( PWD + "/external|monasca_common.tests.external.externalmodule:Blah.method_b" ) self.assertTrue( 'monasca_common.tests.external.externalmodule' in sys.modules) old = sys.modules['monasca_common.tests.external.externalmodule'] import external.externalmodule self.assertEqual( external.externalmodule, sys.modules['monasca_common.tests.external.externalmodule']) self.assertEqual(old, external.externalmodule) self.assertEqual(method, external.externalmodule.Blah.method_b)
def __init__(self): try: super(MetricsMeasurements, self).__init__() self._region = cfg.CONF.region self._delegate_authorized_roles = ( cfg.CONF.security.delegate_authorized_roles) self._get_metrics_authorized_roles = ( cfg.CONF.security.default_authorized_roles + cfg.CONF.security.read_only_authorized_roles) self._post_metrics_authorized_roles = ( cfg.CONF.security.default_authorized_roles + cfg.CONF.security.agent_authorized_roles) self._metrics_repo = simport.load( cfg.CONF.repositories.metrics_driver)() except Exception as ex: LOG.exception(ex) raise falcon.HTTPInternalServerError('Service unavailable', str(ex))
def test_local_class(self): klass = simport.load("LocalClass", __name__) self.assertEqual(klass, LocalClass)
def get_data_provider(): if not PreHourlyProcessorUtil.data_provider: PreHourlyProcessorUtil.data_provider = simport.load( cfg.CONF.pre_hourly_processor.data_provider)() return PreHourlyProcessorUtil.data_provider
def launch_metrics_api(app): metrics = simport.load(cfg.CONF.dispatcher.metrics)() app.add_route("/v2.0/metrics", metrics) metrics_measurements = simport.load( cfg.CONF.dispatcher.metrics_measurements)() app.add_route("/v2.0/metrics/measurements", metrics_measurements) metrics_statistics = simport.load(cfg.CONF.dispatcher.metrics_statistics)() app.add_route("/v2.0/metrics/statistics", metrics_statistics) metrics_names = simport.load(cfg.CONF.dispatcher.metrics_names)() app.add_route("/v2.0/metrics/names", metrics_names) alarm_definitions = simport.load(cfg.CONF.dispatcher.alarm_definitions)() app.add_route("/v2.0/alarm-definitions/", alarm_definitions) app.add_route("/v2.0/alarm-definitions/{alarm_definition_id}", alarm_definitions) alarms = simport.load(cfg.CONF.dispatcher.alarms)() app.add_route("/v2.0/alarms", alarms) app.add_route("/v2.0/alarms/{alarm_id}", alarms) alarm_count = simport.load(cfg.CONF.dispatcher.alarms_count)() app.add_route("/v2.0/alarms/count/", alarm_count) alarms_state_history = simport.load( cfg.CONF.dispatcher.alarms_state_history)() app.add_route("/v2.0/alarms/state-history", alarms_state_history) app.add_route("/v2.0/alarms/{alarm_id}/state-history", alarms_state_history) notification_methods = simport.load( cfg.CONF.dispatcher.notification_methods)() app.add_route("/v2.0/notification-methods", notification_methods) app.add_route("/v2.0/notification-methods/{notification_method_id}", notification_methods) dimension_values = simport.load(cfg.CONF.dispatcher.dimension_values)() app.add_route("/v2.0/metrics/dimensions/names/values", dimension_values) dimension_names = simport.load(cfg.CONF.dispatcher.dimension_names)() app.add_route("/v2.0/metrics/dimensions/names", dimension_names) notification_method_types = simport.load( cfg.CONF.dispatcher.notification_method_types)() app.add_route("/v2.0/notification-methods/types", notification_method_types)
def __init__(self): super(NotificationsType, self).__init__() self._notification_method_type_repo = simport.load( cfg.CONF.repositories.notification_method_type_driver)()
def launch(conf): # use default, but try to access one passed from conf first config_file = conf.get('config_file', "/etc/monasca/api-config.conf") log.register_options(cfg.CONF) log.set_defaults() cfg.CONF(args=[], project='monasca_api', default_config_files=[config_file]) log.setup(cfg.CONF, 'monasca_api') app = falcon.API(request_type=request.Request) versions = simport.load(cfg.CONF.dispatcher.versions)() app.add_route("/", versions) app.add_route("/{version_id}", versions) # The following resource is a workaround for a regression in falcon 0.3 # which causes the path '/v2.0' to not route to the versions resource version_2_0 = simport.load(cfg.CONF.dispatcher.version_2_0)() app.add_route("/v2.0", version_2_0) metrics = simport.load(cfg.CONF.dispatcher.metrics)() app.add_route("/v2.0/metrics", metrics) metrics_measurements = simport.load( cfg.CONF.dispatcher.metrics_measurements)() app.add_route("/v2.0/metrics/measurements", metrics_measurements) metrics_statistics = simport.load(cfg.CONF.dispatcher.metrics_statistics)() app.add_route("/v2.0/metrics/statistics", metrics_statistics) metrics_names = simport.load(cfg.CONF.dispatcher.metrics_names)() app.add_route("/v2.0/metrics/names", metrics_names) alarm_definitions = simport.load(cfg.CONF.dispatcher.alarm_definitions)() app.add_route("/v2.0/alarm-definitions/", alarm_definitions) app.add_route("/v2.0/alarm-definitions/{alarm_definition_id}", alarm_definitions) alarms = simport.load(cfg.CONF.dispatcher.alarms)() app.add_route("/v2.0/alarms", alarms) app.add_route("/v2.0/alarms/{alarm_id}", alarms) alarm_count = simport.load(cfg.CONF.dispatcher.alarms_count)() app.add_route("/v2.0/alarms/count/", alarm_count) alarms_state_history = simport.load( cfg.CONF.dispatcher.alarms_state_history)() app.add_route("/v2.0/alarms/state-history", alarms_state_history) app.add_route("/v2.0/alarms/{alarm_id}/state-history", alarms_state_history) notification_methods = simport.load( cfg.CONF.dispatcher.notification_methods)() app.add_route("/v2.0/notification-methods", notification_methods) app.add_route("/v2.0/notification-methods/{notification_method_id}", notification_methods) dimension_values = simport.load(cfg.CONF.dispatcher.dimension_values)() app.add_route("/v2.0/metrics/dimensions/names/values", dimension_values) dimension_names = simport.load(cfg.CONF.dispatcher.dimension_names)() app.add_route("/v2.0/metrics/dimensions/names", dimension_names) notification_method_types = simport.load( cfg.CONF.dispatcher.notification_method_types)() app.add_route("/v2.0/notification-methods/types", notification_method_types) healthchecks = simport.load(cfg.CONF.dispatcher.healthchecks)() app.add_route("/healthcheck", healthchecks) LOG.debug('Dispatcher drivers have been added to the routes!') return app
def get_db_repo(config): if 'database' in config and 'repo_driver' in config['database']: return simport.load(config['database']['repo_driver'])(config) else: return simport.load('monasca_notification.common.repositories.mysql.mysql_repo:MysqlRepo')(config)
def load_healthcheck_resource(app): healthchecks = simport.load(CONF.dispatcher.healthchecks)() app.add_route(uri_map.HEALTHCHECK_URI, healthchecks)
def reset_kafka_offsets(app_name): """delete all offsets from the offset specification.""" # get the offsets from global var offset_specs = simport.load(cfg.CONF.repositories.offsets)() offset_specs.delete_all_kafka_offsets(app_name)
def load_logs_resource(app): logs = simport.load(CONF.dispatcher.logs)() app.add_route(uri_map.V2_LOGS_URI, logs) logs_v3 = simport.load(CONF.dispatcher.logs_v3)() app.add_route(uri_map.V3_LOGS_URI, logs_v3)
def test_import_class(self): klass = simport.load(PWD + "/external|external.externalmodule:Blah") import external.externalmodule self.assertEqual(klass, external.externalmodule.Blah)
def launch(conf): config.parse_args() app = falcon.API(request_type=request.Request) versions = simport.load(cfg.CONF.dispatcher.versions)() app.add_route("/", versions) app.add_route("/{version_id}", versions) # The following resource is a workaround for a regression in falcon 0.3 # which causes the path '/v2.0' to not route to the versions resource version_2_0 = simport.load(cfg.CONF.dispatcher.version_2_0)() app.add_route("/v2.0", version_2_0) metrics = simport.load(cfg.CONF.dispatcher.metrics)() app.add_route("/v2.0/metrics", metrics) metrics_measurements = simport.load( cfg.CONF.dispatcher.metrics_measurements)() app.add_route("/v2.0/metrics/measurements", metrics_measurements) metrics_statistics = simport.load(cfg.CONF.dispatcher.metrics_statistics)() app.add_route("/v2.0/metrics/statistics", metrics_statistics) metrics_names = simport.load(cfg.CONF.dispatcher.metrics_names)() app.add_route("/v2.0/metrics/names", metrics_names) alarm_definitions = simport.load(cfg.CONF.dispatcher.alarm_definitions)() app.add_route("/v2.0/alarm-definitions/", alarm_definitions) app.add_route("/v2.0/alarm-definitions/{alarm_definition_id}", alarm_definitions) alarms = simport.load(cfg.CONF.dispatcher.alarms)() app.add_route("/v2.0/alarms", alarms) app.add_route("/v2.0/alarms/{alarm_id}", alarms) alarm_count = simport.load(cfg.CONF.dispatcher.alarms_count)() app.add_route("/v2.0/alarms/count/", alarm_count) alarms_state_history = simport.load( cfg.CONF.dispatcher.alarms_state_history)() app.add_route("/v2.0/alarms/state-history", alarms_state_history) app.add_route("/v2.0/alarms/{alarm_id}/state-history", alarms_state_history) notification_methods = simport.load( cfg.CONF.dispatcher.notification_methods)() app.add_route("/v2.0/notification-methods", notification_methods) app.add_route("/v2.0/notification-methods/{notification_method_id}", notification_methods) dimension_values = simport.load(cfg.CONF.dispatcher.dimension_values)() app.add_route("/v2.0/metrics/dimensions/names/values", dimension_values) dimension_names = simport.load(cfg.CONF.dispatcher.dimension_names)() app.add_route("/v2.0/metrics/dimensions/names", dimension_names) notification_method_types = simport.load( cfg.CONF.dispatcher.notification_method_types)() app.add_route("/v2.0/notification-methods/types", notification_method_types) healthchecks = simport.load(cfg.CONF.dispatcher.healthchecks)() app.add_route("/healthcheck", healthchecks) LOG.debug('Dispatcher drivers have been added to the routes!') return app
def launch_log_api(app): logs = simport.load(cfg.CONF.dispatcher.logs)() app.add_route("/v2.0/logs", logs)
def test_good_load_internal(self): self.assertEqual(six.get_function_code(dummy_function), six.get_function_code(simport.load("test_simport:dummy_function"))) self.assertEqual(six.get_function_code(DummyClass.method_a), six.get_function_code(simport.load("test_simport:DummyClass.method_a")))
def load_versions_resource(app): versions = simport.load(CONF.dispatcher.versions)() app.add_route("/version", versions) app.add_route("/version/{version_id}", versions)