def run_main(main_function, configuration_requirements=Namespace()): required_config = Namespace() required_config.add_option( 'rule_system_class', doc='the fully qualified name of the RuleSystem class', default=RuleSystem, from_string_converter=class_converter, ) required_config.update(logging_config) required_config.update(configuration_requirements) config = configuration(required_config) logging.basicConfig(level=config.logging_level, format=config.logging_format) log_config(config) rule_system = config.rule_system_class(config) loop = asyncio.get_event_loop() loop.run_until_complete(rule_system.initialize()) main_function(config, rule_system) loop.run_until_complete(rule_system.go()) try: loop.run_forever() except KeyboardInterrupt: pass
def config_from_configman(): """Generate a configman DotDict to pass to configman components.""" definition_source = Namespace() definition_source.namespace('logging') definition_source.logging = App.required_config.logging definition_source.namespace('metricscfg') definition_source.metricscfg = App.required_config.metricscfg definition_source.namespace('elasticsearch') definition_source.elasticsearch.add_option( 'elasticsearch_class', default=ESConnectionContext, ) definition_source.namespace('queue') definition_source.add_option('crashqueue_class', default=PubSubCrashQueue) definition_source.namespace('crashdata') definition_source.crashdata.add_option( 'crash_data_class', default=SimplifiedCrashData, ) definition_source.namespace('telemetrydata') definition_source.telemetrydata.add_option( 'telemetry_data_class', default=TelemetryCrashData, ) return configuration(definition_source=definition_source, values_source_list=[ settings.SOCORRO_IMPLEMENTATIONS_CONFIG, ])
def run(*crash_ids): definition_source = Namespace() definition_source.namespace('queuing') definition_source.queuing.add_option( 'rabbitmq_reprocessing_class', default=SingleCrashMQCrashStorage, ) config_dict = { 'resource': { 'rabbitmq': { 'host': 'localhost', 'port': '5672', 'virtual_host': '/' } }, 'secrets': { 'rabbitmq': { 'rabbitmq_password': '******', 'rabbitmq_user': '******' } } } config = configuration( definition_source=definition_source, values_source_list=[config_dict], ) config.queuing.logger = logger config.logger = logger storage = SingleCrashMQCrashStorage(config=config['queuing']) for crash_id in crash_ids: print storage.submit(crash_id) return 0
def config_from_configman(): definition_source = Namespace() definition_source.namespace('logging') definition_source.logging = socorro_app.App.required_config.logging definition_source.namespace('metricscfg') definition_source.metricscfg = socorro_app.App.required_config.metricscfg definition_source.namespace('elasticsearch') definition_source.elasticsearch.add_option( 'elasticsearch_class', default=ESConnectionContext, ) definition_source.namespace('queue') definition_source.add_option( 'crashqueue_class', default=PubSubCrashQueue ) definition_source.namespace('crashdata') definition_source.crashdata.add_option( 'crash_data_class', default=socorro.external.boto.crash_data.SimplifiedCrashData, ) definition_source.namespace('telemetrydata') definition_source.telemetrydata.add_option( 'telemetry_data_class', default=socorro.external.boto.crash_data.TelemetryCrashData, ) return configuration( definition_source=definition_source, values_source_list=[ settings.SOCORRO_IMPLEMENTATIONS_CONFIG, ] )
def config_from_configman(): """Generate a configman DotDict to pass to configman components.""" definition_source = Namespace() definition_source.namespace("logging") definition_source.logging = App.required_config.logging definition_source.namespace("metricscfg") definition_source.metricscfg = App.required_config.metricscfg definition_source.namespace("elasticsearch") definition_source.elasticsearch.add_option("elasticsearch_class", default=ESConnectionContext) definition_source.namespace("queue") definition_source.add_option("crashqueue_class", default=import_string(settings.CRASHQUEUE)) definition_source.namespace("crashdata") definition_source.crashdata.add_option("crash_data_class", default=SimplifiedCrashData) definition_source.namespace("telemetrydata") definition_source.telemetrydata.add_option("telemetry_data_class", default=TelemetryCrashData) return configuration( definition_source=definition_source, values_source_list=[settings.SOCORRO_CONFIG], )
def config_from_configman(): definition_source = Namespace() definition_source.namespace("logging") definition_source.logging = socorro_app.App.required_config.logging definition_source.namespace("elasticsearch") definition_source.elasticsearch.add_option("elasticsearch_class", default=ElasticsearchConfig) definition_source.namespace("database") definition_source.database.add_option("database_storage_class", default=PostgreSQLCrashStorage) definition_source.namespace("queuing") definition_source.queuing.add_option("rabbitmq_reprocessing_class", default=ReprocessingOneRabbitMQCrashStore) definition_source.namespace("priority") definition_source.priority.add_option("rabbitmq_priority_class", default=PriorityjobRabbitMQCrashStore) definition_source.namespace("data") definition_source.data.add_option("crash_data_class", default=socorro.external.boto.crash_data.SimplifiedCrashData) config = configuration( definition_source=definition_source, values_source_list=[settings.SOCORRO_IMPLEMENTATIONS_CONFIG] ) # The ReprocessingOneRabbitMQCrashStore crash storage, needs to have # a "logger" in the config object. To avoid having to use the # logger set up by configman as an aggregate, we just use the # same logger as we have here in the webapp. config.queuing.logger = logger config.priority.logger = logger config.data.logger = logger return config
def config_from_configman(): return configuration( definition_source=[ ElasticsearchConfig.required_config, # This required_config defines the logger aggregate socorro_app.App.required_config, ], values_source_list=[ settings.SOCORRO_IMPLEMENTATIONS_CONFIG, ])
def config_from_configman(): return configuration( definition_source=[ ElasticsearchConfig.required_config, PostgreSQLCrashStorage.required_config, # This required_config defines the logger aggregate socorro_app.App.required_config, ], values_source_list=[settings.SOCORRO_IMPLEMENTATIONS_CONFIG], )
def config_from_configman(): definition_source = Namespace() definition_source.namespace('logging') definition_source.logging = socorro_app.App.required_config.logging definition_source.namespace('metricscfg') definition_source.metricscfg = socorro_app.App.required_config.metricscfg definition_source.namespace('elasticsearch') definition_source.elasticsearch.add_option( 'elasticsearch_class', default=ElasticsearchConfig, ) definition_source.namespace('database') definition_source.database.add_option( 'database_storage_class', default=PostgreSQLStorage, ) definition_source.namespace('queuing') definition_source.queuing.add_option( 'rabbitmq_reprocessing_class', default=ReprocessingOneRabbitMQCrashStore, ) definition_source.namespace('priority') definition_source.priority.add_option( 'rabbitmq_priority_class', default=PriorityjobRabbitMQCrashStore, ) definition_source.namespace('crashdata') definition_source.crashdata.add_option( 'crash_data_class', default=socorro.external.boto.crash_data.SimplifiedCrashData, ) definition_source.namespace('telemetrydata') definition_source.telemetrydata.add_option( 'telemetry_data_class', default=socorro.external.boto.crash_data.TelemetryCrashData, ) config = configuration( definition_source=definition_source, values_source_list=[ settings.SOCORRO_IMPLEMENTATIONS_CONFIG, ] ) # The ReprocessingOneRabbitMQCrashStore crash storage, needs to have # a "logger" in the config object. To avoid having to use the # logger set up by configman as an aggregate, we just use the # same logger as we have here in the webapp. config.queuing.logger = logger config.priority.logger = logger config.crashdata.logger = logger config.telemetrydata.logger = logger return config
def config_from_configman(): return configuration( definition_source=[ def_source, # This is a tie-over until we totally get rid of the MiddlwareApp. # At the moment it defines some useful options that are required # by SuperSearch implementation. MiddlewareApp.required_config.webapi, ], values_source_list=[ settings.SOCORRO_IMPLEMENTATIONS_CONFIG, ] )
def config_from_configman(): """Generate a configman DotDict to pass to configman components.""" definition_source = Namespace() definition_source.namespace('logging') definition_source.logging = App.required_config.logging definition_source.namespace('metricscfg') definition_source.metricscfg = App.required_config.metricscfg definition_source.namespace('elasticsearch') definition_source.elasticsearch.add_option( 'elasticsearch_class', default=ESConnectionContext, ) definition_source.namespace('queue') definition_source.add_option( 'crashqueue_class', default=PubSubCrashQueue ) definition_source.namespace('crashdata') definition_source.crashdata.add_option( 'crash_data_class', default=SimplifiedCrashData, ) definition_source.namespace('telemetrydata') definition_source.telemetrydata.add_option( 'telemetry_data_class', default=TelemetryCrashData, ) return configuration( definition_source=definition_source, values_source_list=[ settings.SOCORRO_IMPLEMENTATIONS_CONFIG, ] )
) from socorro.app.socorro_app import App from socorro.dataservice.util import ( classes_in_namespaces_converter, ) SERVICES_LIST = ('socorro.external.postgresql.bugs_service.Bugs',) # Allow configman to dynamically load the configuration and classes # for our API dataservice objects def_source = Namespace() def_source.namespace('services') def_source.services.add_option( 'service_list', default=','.join(SERVICES_LIST), from_string_converter=classes_in_namespaces_converter() ) settings.DATASERVICE_CONFIG = configuration( definition_source=[ def_source, App.get_required_config(), ], values_source_list=[ settings.DATASERVICE_CONFIG_BASE, # ConfigFileFutureProxy, environment ] )
( a_box_size * box_x + box_relative_x, a_box_size * box_y + box_relative_y ) ) ): found_pixel = True box_count += 1 break if found_pixel: break gx.append(math.log(1.0 / a_box_size)) print box_count gy.append(math.log(box_count)) print gx, gy slope, intercept, r_value, p_value, std_err = linregress(gx, gy) return slope return b if __name__ == "__main__": config = configuration(required_config) pixels = Image.open(config.image_file) print box_count(pixels, config.box_sizes, config.interesting_pixel_function)
required_config.server = Namespace() required_config.server.add_option( name='wot_server_class', default="pywot.WoTServer", doc="the fully qualified name of the WoT Server class", from_string_converter=class_converter) required_config.add_option( name="scene_thing_class", default=SceneThing, doc="the fully qualified name of the class that implents scene control", from_string_converter=class_converter) required_config.add_option( 'things_gateway_auth_key', doc='the api key to access the Things Gateway', short_form="G", default='THINGS GATEWAY AUTH KEY', ) required_config.update(logging_config) config = configuration(required_config) logging.basicConfig(level=config.logging_level, format=config.logging_format) log_config(config) scene_thing = config.scene_thing_class(config) server = config.server.wot_server_class(config, [scene_thing], port=config.server.service_port) server.run() logging.debug('done.')
tide_light_name_list.append(namespace_name_for_index) required_config[namespace_name_for_index] = Namespace() required_config[namespace_name_for_index].update(tide_light_config) def __repr__(self): return number_of_tide_lights return TideLightCollection() # the definition of the number_of_tide_lights config parameter says that the function # tide_config_setup should be used to convert any text input (from command line or # config files) into the collection of nested namespaces representing each individual # tide light. base_required_config.add_option('number_of_tide_lights', doc='how many tide lights to set up', default="1", short_form="n", from_string_converter=tide_config_setup) base_required_config.update(logging_config) if __name__ == '__main__': config = configuration([base_required_config]) logging.basicConfig(level=config.logging_level, format=config.logging_format) log_config(config) event_loop = asyncio.get_event_loop() event_loop.run_until_complete(run_all_tide_lights(config))