def __init__(self, options): self.__service_endpoints = spectator_client.determine_service_endpoints( options) self.__spectator = spectator_client.SpectatorClient(options) self.__add_metalabels = options.get('prometheus_add_source_metalabels', True) REGISTRY.register(self)
def __init__(self, options): if not PROMETHEUS_AVAILABLE: raise ImportError( 'You must "pip install prometheus-client" to get' ' the prometheus client library.') self.__catalog = spectator_client.get_source_catalog(options) self.__spectator = spectator_client.SpectatorClient(options) self.__spectator_helper = spectator_client.SpectatorClientHelper(options) prometheus_options = options.get('prometheus', {}) # '_' is preferred but ':' was used in earlier releases. self.__hierarchy_separator = ( '_' if prometheus_options.get('use_standard_notation') == True else ':' ) add_metalabels = options.get( 'prometheus_add_source_metalabels', prometheus_options.get('add_source_metalabels', True)) self.__metalabels = {'job', 'instance'} if add_metalabels else {} self.__push_gateway = options.get('prometheus', {}).get('push_gateway') if self.__push_gateway: self.publish_metrics = self.__publish_to_gateway self.__last_collect_time = 0 self.__last_collect_metric_map = {} REGISTRY.register(self) # Register this so it will call our collect()
def __call__(self, options, metric_service_list): """This is the actual method that implements the CommandHandler. It is put here in a callable so that we can run this in a separate thread. The main thread will be the standard WebServer. """ period = options['period'] service_endpoints = spectator_client.determine_service_endpoints( options) spectator = spectator_client.SpectatorClient(options) publishing_services = [ service for service in metric_service_list if 'publish_metrics' in dir(service) ] logging.info('Starting Monitor') time_offset = int(time.time()) while True: if not publishing_services: # we still need this loop to keep the server running # but the loop doesnt do anything. time.sleep(period) continue start = time.time() done = start service_metric_map = spectator.scan_by_service(service_endpoints) collected = time.time() for service in publishing_services: try: start_publish = time.time() count = service.publish_metrics(service_metric_map) if count is None: count = 0 done = time.time() logging.info('Wrote %d metrics to %s in %d ms + %d ms', count, service.__class__.__name__, (collected - start) * 1000, (done - start_publish) * 1000) except: logging.error(traceback.format_exc()) # ignore exception, continue server. # Try to align time increments so we always collect around the same time # so that the measurements we report are in even intervals. # There is still going to be jitter on the collection end but we'll at # least always start with a steady rhythm. now = time.time() delta_time = (period - (int(now) - time_offset)) % period if delta_time == 0 and (int(now) == time_offset or (now - start <= 1)): delta_time = period time.sleep(delta_time)
def __get_type_and_tag_map_and_active_services(self, catalog, options): self.__transforming_spectator = spectator_client.SpectatorClient(options) filtering_options = dict(options) filtering_options['disable_metric_filter'] = True self.__filtering_spectator = spectator_client.SpectatorClient( filtering_options) scan_options = dict(filtering_options) scan_options['disable_metric_transform'] = True raw_spectator = spectator_client.SpectatorClient(scan_options) self.__raw_spectator = raw_spectator self.__service_map = raw_spectator.scan_by_service( catalog, params=scan_options) type_map = raw_spectator.service_map_to_type_map(self.__service_map) service_tag_map, active_services = self.to_service_tag_map(type_map) return type_map, service_tag_map, active_services
def __init__(self, options): self.__catalog = spectator_client.get_source_catalog( options['config_dir']) options = util.merge_options_and_yaml_from_path( options, os.path.join(options['config_dir'], 'prometheus.conf')) self.__spectator = spectator_client.SpectatorClient(options) self.__add_metalabels = options.get('prometheus_add_source_metalabels', True) REGISTRY.register(self)
def setUp(self): self.options = {'prototype_path': None, 'host': 'spectator_hostname', 'services': ['clouddriver', 'gate']} self.spectator = spectator_client.SpectatorClient(self.options) self.mock_clouddriver_response = ( StringIO(sample_data.CLOUDDRIVER_RESPONSE_TEXT)) self.mock_gate_response = StringIO(sample_data.GATE_RESPONSE_TEXT) self.mock_request = Mock() self.mock_request.respond = Mock()
def _get_data_map(self, catalog, options): restrict_services = options.get('services', None) if restrict_services: catalog = {service: config for service, config in catalog.items() if service in restrict_services.split(',')} spectator = spectator_client.SpectatorClient(options) by = options.get('by', 'service') if by == 'service': data_map = spectator.scan_by_service(catalog, params=options) else: data_map = spectator.scan_by_type(catalog, params=options) return data_map
def setUp(self): self.options = args_to_options([ '--host=spectator_hostname', '--service_hosts=', '--clouddriver=spectator_hostname', '--gate=spectator_hostname' ]) command_processor.set_global_options(self.options) self.spectator = spectator_client.SpectatorClient(self.options) self.mock_clouddriver_response = (StringIO( sample_data.CLOUDDRIVER_RESPONSE_TEXT)) self.mock_gate_response = StringIO(sample_data.GATE_RESPONSE_TEXT) self.mock_request = Mock() self.mock_request.respond = Mock()
def setUp(self): self.mock_options = Mock() self.mock_options.prototype_path = None self.mock_options.host = 'spectator_hostname' self.mock_options.services = ['clouddriver', 'gate'] self.spectator = spectator_client.SpectatorClient(self.mock_options) expect_clouddriver = sample_data.SAMPLE_CLOUDDRIVER_RESPONSE_OBJ expect_gate = sample_data.SAMPLE_GATE_RESPONSE_OBJ self.mock_clouddriver_response = (StringIO( sample_data.SAMPLE_CLOUDDRIVER_RESPONSE_TEXT)) self.mock_gate_response = StringIO( sample_data.SAMPLE_GATE_RESPONSE_TEXT) self.mock_request = Mock() self.mock_request.respond = Mock()
def __init__(self, options): if not prometheus_available: raise ImportError( 'You must "pip install prometheus-client" to get the prometheus client library.' ) self.__catalog = spectator_client.get_source_catalog(options) self.__spectator = spectator_client.SpectatorClient(options) self.__add_metalabels = options.get( 'prometheus_add_source_metalabels', options.get('prometheus', {}).get('add_source_metalabels', True)) self.__push_gateway = options.get('prometheus', {}).get('push_gateway') if self.__push_gateway: self.publish_metrics = self.__publish_to_gateway self.__last_collect_time = 0 self.__last_collect_metric_map = {} REGISTRY.register(self) # Register this so it will call our collect()
def __call__(self, options, metric_service): """This is the actual method that implements the CommandHandler. It is put here in a callable so that we can run this in a separate thread. The main thread will be the standard WebServer. """ period = options['period'] service_endpoints = spectator_client.determine_service_endpoints( options) spectator = spectator_client.SpectatorClient(options) logging.info('Starting Monitor') time_offset = int(time.time()) while True: start = time.time() done = start service_metric_map = spectator.scan_by_service(service_endpoints) collected = time.time() try: count = metric_service.publish_metrics(service_metric_map) if count is None: count = 0 done = time.time() logging.info('Wrote %d metrics in %d ms + %d ms', count, (collected - start) * 1000, (done - collected) * 1000) except BaseException as ex: traceback.print_exc(ex) logging.error(ex) # Try to align time increments so we always collect around the same time # so that the measurements we report are in even intervals. # There is still going to be jitter on the collection end but we'll at # least always start with a steady rhythm. delta_time = (period - (int(done) - time_offset)) % period if delta_time == 0 and (int(done) == time_offset or (done - start <= 1)): delta_time = period time.sleep(delta_time)
def main(): init_logging('metric_collector.log') options = vars(get_options()) spectator = spectator_client.SpectatorClient(options) try: stackdriver = StackdriverMetricsService.make_service(options) except IOError as ioerror: logging.error( 'Could not create stackdriver client' ' -- Stackdriver will be unavailable\n%s', ioerror) stackdriver = None registry = [] registry.extend([ CommandDefinition(handlers.BaseHandler(options, registry), '/', 'Home', CommandRequest(options=options), 'Home page for Spinnaker metric administration.'), CommandDefinition( stackdriver_handlers.ClearCustomDescriptorsHandler( options, stackdriver), '/stackdriver/clear_descriptors', 'clear', CommandRequest(options=options), 'Clear all the Stackdriver Custom Metrics'), CommandDefinition( stackdriver_handlers.ListCustomDescriptorsHandler( options, stackdriver), '/stackdriver/list_descriptors', 'list', CommandRequest(content_type='application/json', options=options), 'Get the JSON of all the Stackdriver Custom Metric Descriptors.'), CommandDefinition( stackdriver_handlers.UpsertCustomDescriptorsHandler( options, stackdriver), None, 'upsert_descriptors', CommandRequest(options=options), 'Given a file of Stackdriver Custom Metric Desciptors,' ' update the existing ones and add the new ones.' ' WARNING: Historic time-series data may be lost on update.'), CommandDefinition( handlers.DumpMetricsHandler(options, spectator), '/dump', 'dump', CommandRequest(options=options), 'Show current raw metric JSON from all the servers.'), CommandDefinition( handlers.ExploreCustomDescriptorsHandler(options, spectator), '/explore', 'explore', CommandRequest(options=options), 'Explore metric type usage across Spinnaker microservices.', ), CommandDefinition( handlers.ShowCurrentMetricsHandler(options, spectator), '/show', 'show', CommandRequest(options=options), 'Show current metric JSON for all Spinnaker.'), ]) if options.get('command', None): command_processor.process_command(options['command'], registry) return if options.get('monitor', None): logging.info('Starting Monitor every %d s', options['period']) metric_service = stackdriver monitor = Monitor(spectator, metric_service, options) threading.Thread(target=monitor, name='monitor').start() logging.info('Starting HTTP server on port %d', options['port']) url_path_to_handler = {entry.url_path: entry.handler for entry in registry} httpd = HttpServer(options['port'], url_path_to_handler) httpd.serve_forever() sys.exit(-1)
def main(): init_logging('metric_collector.log') options = get_options() spectator = spectator_client.SpectatorClient(options) try: stackdriver = stackdriver_client.StackdriverClient.make_client(options) except IOError as ioerror: logging.error( 'Could not create stackdriver client' ' -- Stackdriver will be unavailable\n%s', ioerror) stackdriver = None registry = [] registry.extend([ CommandDefinition(handlers.BaseHandler(options, registry), '/', 'Home', CommandRequest(options=options), 'Home page for Spinnaker metric administration.'), CommandDefinition( stackdriver_handlers.ClearCustomDescriptorsHandler( options, stackdriver), '/stackdriver/clear_descriptors', 'clear', CommandRequest(options=options), 'Clear all the Stackdriver Custom Metrics'), CommandDefinition( stackdriver_handlers.ListCustomDescriptorsHandler( options, stackdriver), '/stackdriver/list_descriptors', 'list', CommandRequest(content_type='application/json', options=options), 'Get the JSON of all the Stackdriver Custom Metric Descriptors.'), CommandDefinition( handlers.DumpMetricsHandler(options, spectator), '/dump', 'dump', CommandRequest(options=options), 'Show current raw metric JSON from all the servers.'), CommandDefinition( handlers.ExploreCustomDescriptorsHandler(options, spectator), '/explore', 'explore', CommandRequest(options=options), 'Explore metric type usage across Spinnaker microservices.', ), CommandDefinition( handlers.ShowCurrentMetricsHandler(options, spectator), '/show', 'show', CommandRequest(options=options), 'Show current metric JSON for all Spinnaker.'), ]) if options.command: command_processor.process_command(options.command, registry) return if options.monitor: logging.info('Starting Monitor every %d s', options.period) # TODO: Replace this with a real service. metric_service = DummyMetricService() monitor = Monitor(spectator, metric_service, options) threading.Thread(target=monitor, name='monitor').start() logging.info('Starting HTTP server on port %d', options.port) url_path_to_handler = {entry.url_path: entry.handler for entry in registry} httpd = HttpServer(options.port, url_path_to_handler) httpd.serve_forever() sys.exit(-1)
def setUp(self): options = {'prototype_path': None, 'host': TEST_HOST} self.spectator = spectator_client.SpectatorClient(options) self.default_query_params = '?tagNameRegex=.%2B' # tagNameRegex=.+
def make_spectator_client(self, options): return spectator_client.SpectatorClient(options)
def setUp(self): options = Mock() options.prototype_path = None options.host = TEST_HOST self.spectator = spectator_client.SpectatorClient(options)
def setUp(self): options = {'prototype_path': None, 'host': TEST_HOST} self.spectator = spectator_client.SpectatorClient(options)