def test_refresh_invalid_response(monkeypatch, tmpdir): tokens.configure(dir=str(tmpdir), url='https://example.org') tokens.manage('mytok', ['myscope']) tokens.start() # this does not do anything.. response = MagicMock() response.json.return_value = {'foo': 'bar'} post = MagicMock() post.return_value = response monkeypatch.setattr('requests.post', post) monkeypatch.setattr('tokens.read_credentials', lambda path: (VALID_USER_JSON, VALID_CLIENT_JSON)) with pytest.raises(tokens.InvalidTokenResponse) as exc_info: tokens.get('mytok') assert str(exc_info.value) == """Invalid token response: Expected a JSON object with keys "expires_in" and "access_token": 'expires_in'""" # verify that we use a proper HTTP timeout.. post.assert_called_with('https://example.org', data={'username': '******', 'scope': 'myscope', 'password': '******', 'grant_type': 'password'}, headers={'User-Agent': 'python-tokens/{}'.format(tokens.__version__)}, timeout=(1.25, 2.25), auth=('cid', 'sec')) response.json.return_value = {'access_token': '', 'expires_in': 100} with pytest.raises(tokens.InvalidTokenResponse) as exc_info: tokens.get('mytok') assert str(exc_info.value) == 'Invalid token response: Empty "access_token" value'
def get_token(url: str, scopes: str, credentials_dir: str) -> dict: """ Get access token info. """ tokens.configure(url=url, dir=credentials_dir) tokens.manage('lizzy', [scopes]) tokens.start() return tokens.get('lizzy')
def __init__(self, kafka_broker, metrics, callback_store, worker_thread_count=1, max_read_messages_per_cycle=1, use_oauth2_for_push=False): threading.Thread.__init__(self, daemon=True) self.use_oauth2_for_push = use_oauth2_for_push self.callback_store = callback_store self.worker_thread_count = worker_thread_count self.max_read_messages_per_cycle = max_read_messages_per_cycle self.kafka = kafka_broker self.metrics = metrics if use_oauth2_for_push: tokens.manage('event-store', ['uid']) tokens.start()
def __init__(self): if os.getenv('CREDENTIALS_DIR') or os.getenv('OAUTH2_ACCESS_TOKENS'): # no human user credentials if running as service # CREDENTIALS_DIR means running on Taupage, OAUTH2_ACCESS_TOKENS means Docker locally tokens.manage('costreport', ['uid']) tokens.start() self.on_taupage = True elif os.getenv('TRAVIS'): self.token = "i-am-on-travis-and-will-not-be-used" self.on_taupage = False else: # human user self.token = zign.api.get_token('costreport', ['uid']) self.on_taupage = False
def use_berry_token(app_name): tokens.configure() tokens.manage( app_name, ['nakadi.event_stream.read', 'nakadi.event_stream.write', 'nakadi.event_type.write', 'uid']) tokens.start() def _get_token(): try: return tokens.get(app_name) except Exception as e: logging.error('Failed to get token for {}'.format(app_name), exc_info=e) return '' set_provider(_get_token)
def test_refresh_invalid_credentials(monkeypatch, tmpdir): tokens.configure(dir=str(tmpdir), url='https://example.org') tokens.manage('mytok', ['myscope']) tokens.start() # this does not do anything.. with open(os.path.join(str(tmpdir), 'user.json'), 'w') as fd: # missing password json.dump({'application_username': '******'}, fd) with open(os.path.join(str(tmpdir), 'client.json'), 'w') as fd: json.dump({'client_id': 'cid', 'client_secret': 'sec'}, fd) with pytest.raises(tokens.InvalidCredentialsError) as exc_info: tokens.get('mytok') assert str(exc_info.value) == "Invalid OAuth credentials: Missing key: 'application_password'"
def configure(self, conf): """ Called after plugin is loaded to pass the [configuration] section in their plugin info file :param conf: configuration dictionary """ # will use OAUTH2_ACCESS_TOKEN_URL environment variable by default # will try to read application credentials from CREDENTIALS_DIR tokens.configure() token_configuration = conf.get('oauth2.tokens') if token_configuration: for part in token_configuration.split(':'): token_name, scopes = tuple(part.split('=', 1)) tokens.manage(token_name, scopes.split(',')) tokens.manage('uid', ['uid']) tokens.start()
import requests import tokens from zmon_worker_monitor.zmon_worker.errors import ConfigurationError from zmon_worker_monitor.builtins.plugins.distance_to_history import DistanceWrapper from zmon_worker_monitor.adapters.ifunctionfactory_plugin import IFunctionFactoryPlugin, propartial logger = logging.getLogger(__name__) # will use OAUTH2_ACCESS_TOKEN_URL environment variable by default # will try to read application credentials from CREDENTIALS_DIR tokens.configure() tokens.manage('uid', ['uid']) tokens.start() ONE_WEEK = 7 * 24 * 60 * 60 ONE_WEEK_AND_5MIN = ONE_WEEK + 5 * 60 DATAPOINTS_ENDPOINT = 'api/v1/datapoints/query' class HistoryFactory(IFunctionFactoryPlugin): def __init__(self): super(HistoryFactory, self).__init__() # fields from configuration def configure(self, conf): """ Called after plugin is loaded to pass the [configuration] section in their plugin info file
def main(): argp = argparse.ArgumentParser(description='ZMON AWS Agent') argp.add_argument('-e', '--entity-service', dest='entityservice') argp.add_argument('-r', '--region', dest='region', default=None) argp.add_argument('-j', '--json', dest='json', action='store_true') argp.add_argument('--no-oauth2', dest='disable_oauth2', action='store_true', default=False) argp.add_argument('--postgresql-user', dest='postgresql_user', default=os.environ.get('AGENT_POSTGRESQL_USER')) argp.add_argument('--postgresql-pass', dest='postgresql_pass', default=os.environ.get('AGENT_POSTGRESQL_PASS')) args = argp.parse_args() if not args.disable_oauth2: tokens.configure() tokens.manage('uid', ['uid']) tokens.start() logging.basicConfig(level=logging.INFO) # 1. Determine region if not args.region: logger.info('Trying to figure out region..') try: response = requests.get( 'http://169.254.169.254/latest/meta-data/placement/availability-zone', timeout=2) except: logger.exception( 'Region was not specified as a parameter and can not be fetched from instance meta-data!' ) raise region = response.text[:-1] else: region = args.region logger.info('Using region: {}'.format(region)) logger.info('Entity service URL: %s', args.entityservice) logger.info('Reading DNS data for hosted zones') aws.populate_dns_data() aws_account_id = aws.get_account_id(region) infrastructure_account = 'aws:{}'.format( aws_account_id) if aws_account_id else None if not infrastructure_account: logger.error( 'AWS agent: Cannot determine infrastructure account ID. Terminating!' ) return # 2. ZMON entities token = None if args.disable_oauth2 else tokens.get('uid') zmon_client = Zmon(args.entityservice, token=token, user_agent=get_user_agent()) query = { 'infrastructure_account': infrastructure_account, 'region': region, 'created_by': 'agent' } entities = zmon_client.get_entities(query) # 3. Get running apps apps = aws.get_running_apps(region, entities) elbs = [] scaling_groups = [] rds = [] elasticaches = [] dynamodbs = [] sqs = [] new_entities = [] to_be_removed = [] if len(apps) > 0: elbs = aws.get_running_elbs(region, infrastructure_account) scaling_groups = aws.get_auto_scaling_groups(region, infrastructure_account) rds = aws.get_rds_instances(region, infrastructure_account, entities) elasticaches = aws.get_elasticache_nodes(region, infrastructure_account) dynamodbs = aws.get_dynamodb_tables(region, infrastructure_account) certificates = aws.get_certificates(region, infrastructure_account) aws_limits = aws.get_limits(region, infrastructure_account, apps, elbs) sqs = aws.get_sqs_queues(region, infrastructure_account, entities) account_alias = aws.get_account_alias(region) ia_entity = { 'type': 'local', 'infrastructure_account': infrastructure_account, 'account_alias': account_alias, 'region': region, 'id': 'aws-ac[{}:{}]'.format(infrastructure_account, region), 'created_by': 'agent', } application_entities = aws.get_apps_from_entities(apps, infrastructure_account, region) if args.postgresql_user and args.postgresql_pass: postgresql_clusters = zmon_client.get_entities({ 'infrastructure_account': infrastructure_account, 'region': region, 'type': 'postgresql_cluster' }) postgresql_databases = postgresql.get_databases_from_clusters( postgresql_clusters, infrastructure_account, region, args.postgresql_user, args.postgresql_pass) else: # Pretend the list of DBs is empty, but also make sure we don't remove # any pre-existing database entities because we don't know about them. postgresql_databases = [] entities = [ e for e in entities if e.get('type') != 'postgresql_database' ] current_entities = (elbs + scaling_groups + apps + application_entities + rds + postgresql_databases + elasticaches + dynamodbs + certificates + sqs) current_entities.append(aws_limits) current_entities.append(ia_entity) # 4. Removing misssing entities existing_ids = get_existing_ids(entities) current_entities_ids = {e['id'] for e in current_entities} to_be_removed, delete_error_count = remove_missing_entities( existing_ids, current_entities_ids, zmon_client, json=args.json) logger.info( 'Found {} removed entities from {} entities ({} failed)'.format( len(new_entities), len(current_entities), delete_error_count)) # 5. Get new/updated entities new_entities, add_error_count = add_new_entities(current_entities, entities, zmon_client, json=args.json) logger.info('Found {} new entities from {} entities ({} failed)'.format( len(new_entities), len(current_entities), add_error_count)) # 6. Always add Local entity if not args.json: ia_entity['errors'] = { 'delete_count': delete_error_count, 'add_count': add_error_count } try: zmon_client.add_entity(ia_entity) except: logger.exception( 'Failed to add Local entity: {}'.format(ia_entity)) types = { e['type']: len([t for t in new_entities if t['type'] == e['type']]) for e in new_entities } for t, v in types.items(): logger.info('Found {} new entities of type: {}'.format(v, t)) # Check if it is a dry run! if args.json: d = { 'applications': application_entities, 'apps': apps, 'dynamodb': dynamodbs, 'elbs': elbs, 'elc': elasticaches, 'rds': rds, 'certificates': certificates, 'aws_limits': aws_limits, 'sqs_queues': sqs, 'new_entities': new_entities, 'to_be_removed': to_be_removed, } print(json.dumps(d, indent=4))
import json from prometheus_client.parser import text_string_to_metric_families from collections import defaultdict from zmon_worker_monitor.zmon_worker.errors import HttpError from requests.adapters import HTTPAdapter from zmon_worker_monitor.adapters.ifunctionfactory_plugin import IFunctionFactoryPlugin, propartial import tokens # will use OAUTH2_ACCESS_TOKEN_URL environment variable by default # will try to read application credentials from CREDENTIALS_DIR tokens.configure() tokens.manage('uid', ['uid']) tokens.start() logger = logging.getLogger('zmon-worker.http-function') class HttpFactory(IFunctionFactoryPlugin): def __init__(self): super(HttpFactory, self).__init__() def configure(self, conf): """ Called after plugin is loaded to pass the [configuration] section in their plugin info file :param conf: configuration dictionary """ return
def main(): argp = argparse.ArgumentParser(description='ZMON AWS Agent') argp.add_argument('-e', '--entity-service', dest='entityservice') argp.add_argument('-r', '--region', dest='region', default=None) argp.add_argument('-j', '--json', dest='json', action='store_true') argp.add_argument('-t', '--tracer', dest='tracer', default=os.environ.get('OPENTRACING_TRACER', 'noop')) argp.add_argument('--no-oauth2', dest='disable_oauth2', action='store_true', default=False) argp.add_argument('--postgresql-user', dest='postgresql_user', default=os.environ.get('AGENT_POSTGRESQL_USER')) argp.add_argument('--postgresql-pass', dest='postgresql_pass', default=os.environ.get('AGENT_POSTGRESQL_PASS')) args = argp.parse_args() if not args.disable_oauth2: tokens.configure() tokens.manage('uid', ['uid']) tokens.start() init_opentracing_tracer(args.tracer) root_span = opentracing.tracer.start_span( operation_name='aws_entity_discovery') with root_span: logging.basicConfig(level=logging.INFO) # 0. Fetch extra data for entities entity_extras = {} for ex in os.getenv('EXTRA_ENTITY_FIELDS', '').split(','): if '=' not in ex: continue k, v = ex.split('=', 1) if k and v: entity_extras[k] = v # 1. Determine region if not args.region: logger.info('Trying to figure out region..') try: response = requests.get( 'http://169.254.169.254/latest/meta-data/placement/availability-zone', timeout=2) except Exception: root_span.set_tag('error', True) root_span.log_kv({'exception': traceback.format_exc()}) logger.exception( 'Region was not specified as a parameter and' + 'can not be fetched from instance meta-data!') raise region = response.text[:-1] else: region = args.region root_span.set_tag('region', region) logger.info('Using region: {}'.format(region)) logger.info('Entity service URL: %s', args.entityservice) logger.info('Reading DNS data for hosted zones') aws.populate_dns_data() aws_account_id = aws.get_account_id(region) infrastructure_account = 'aws:{}'.format( aws_account_id) if aws_account_id else None if not infrastructure_account: logger.error( 'AWS agent: Cannot determine infrastructure account ID. Terminating!' ) return root_span.set_tag('account', infrastructure_account) # 2. ZMON entities if not args.disable_oauth2: token = os.getenv('ZMON_TOKEN', None) or tokens.get('uid') zmon_client = Zmon(args.entityservice, token=token, user_agent=get_user_agent()) query = { 'infrastructure_account': infrastructure_account, 'region': region, 'created_by': 'agent' } entities = zmon_client.get_entities(query) # 3. Get running apps apps = aws.get_running_apps(region, entities) elbs = [] scaling_groups = [] rds = [] elasticaches = [] dynamodbs = [] sqs = [] new_entities = [] to_be_removed = [] if len(apps) > 0: elbs = aws.get_running_elbs(region, infrastructure_account) scaling_groups = aws.get_auto_scaling_groups( region, infrastructure_account) rds = aws.get_rds_instances(region, infrastructure_account, entities) elasticaches = aws.get_elasticache_nodes(region, infrastructure_account) dynamodbs = aws.get_dynamodb_tables(region, infrastructure_account) certificates = aws.get_certificates(region, infrastructure_account) aws_limits = aws.get_limits(region, infrastructure_account, apps, elbs, entities) sqs = aws.get_sqs_queues(region, infrastructure_account, entities) postgresql_clusters = postgresql.get_postgresql_clusters( region, infrastructure_account, scaling_groups, apps) account_alias = aws.get_account_alias(region) ia_entity = { 'type': 'local', 'infrastructure_account': infrastructure_account, 'account_alias': account_alias, 'region': region, 'id': 'aws-ac[{}:{}]'.format(infrastructure_account, region), 'created_by': 'agent', } account_alias_prefix = os.getenv('ACCOUNT_ALIAS_PREFIX', None) owner = account_alias if account_alias_prefix: owner = owner.replace(account_alias_prefix, '', 1) root_span.set_tag('team', owner) application_entities = aws.get_apps_from_entities( apps, infrastructure_account, region) if args.postgresql_user and args.postgresql_pass: postgresql_databases = postgresql.get_databases_from_clusters( postgresql_clusters, infrastructure_account, region, args.postgresql_user, args.postgresql_pass) else: # Pretend the list of DBs is empty, but also make sure we don't remove # any pre-existing database entities because we don't know about them. postgresql_databases = [] entities = [ e for e in entities if e.get('type') != 'postgresql_database' ] current_entities = (elbs + scaling_groups + apps + application_entities + rds + postgresql_databases + postgresql_clusters + elasticaches + dynamodbs + certificates + sqs) current_entities.append(aws_limits) current_entities.append(ia_entity) for entity in current_entities: entity.update(entity_extras) # 4. Removing misssing entities existing_ids = get_existing_ids(entities) current_entities_ids = {e['id'] for e in current_entities} to_be_removed, delete_error_count = remove_missing_entities( existing_ids, current_entities_ids, zmon_client, json=args.json) root_span.log_kv({'total_entitites': str(len(current_entities))}) root_span.log_kv({'removed_entities': str(len(to_be_removed))}) logger.info( 'Found {} removed entities from {} entities ({} failed)'.format( len(to_be_removed), len(current_entities), delete_error_count)) # 5. Get new/updated entities new_entities, add_error_count = add_new_entities(current_entities, entities, zmon_client, json=args.json) root_span.log_kv({'new_entities': str(len(new_entities))}) logger.info( 'Found {} new entities from {} entities ({} failed)'.format( len(new_entities), len(current_entities), add_error_count)) # 6. Always add Local entity if not args.json: ia_entity['errors'] = { 'delete_count': delete_error_count, 'add_count': add_error_count } update_local_entity(zmon_client, ia_entity) types = { e['type']: len([t for t in new_entities if t['type'] == e['type']]) for e in new_entities } for t, v in types.items(): logger.info('Found {} new entities of type: {}'.format(v, t)) # Check if it is a dry run! if args.json: d = { 'applications': application_entities, 'apps': apps, 'dynamodb': dynamodbs, 'elbs': elbs, 'elc': elasticaches, 'rds': rds, 'certificates': certificates, 'aws_limits': aws_limits, 'sqs_queues': sqs, 'new_entities': new_entities, 'to_be_removed': to_be_removed, 'posgresql_clusters': postgresql_clusters } print(json.dumps(d, indent=4))
def main(): argp = argparse.ArgumentParser(description='ZMON AWS Agent') argp.add_argument('-e', '--entity-service', dest='entityservice') argp.add_argument('-r', '--region', dest='region', default=None) argp.add_argument('-j', '--json', dest='json', action='store_true') argp.add_argument('-t', '--tracer', dest='tracer', default=os.environ.get('OPENTRACING_TRACER', 'noop')) argp.add_argument('--no-oauth2', dest='disable_oauth2', action='store_true', default=False) argp.add_argument('--postgresql-user', dest='postgresql_user', default=os.environ.get('AGENT_POSTGRESQL_USER')) argp.add_argument('--postgresql-pass', dest='postgresql_pass', default=os.environ.get('AGENT_POSTGRESQL_PASS')) args = argp.parse_args() if not args.disable_oauth2: tokens.configure() tokens.manage('uid', ['uid']) tokens.start() init_opentracing_tracer(args.tracer) root_span = opentracing.tracer.start_span(operation_name='aws_entity_discovery') with root_span: logging.basicConfig(level=logging.INFO) # 0. Fetch extra data for entities entity_extras = {} for ex in os.getenv('EXTRA_ENTITY_FIELDS', '').split(','): if '=' not in ex: continue k, v = ex.split('=', 1) if k and v: entity_extras[k] = v # 1. Determine region if not args.region: logger.info('Trying to figure out region..') try: response = requests.get('http://169.254.169.254/latest/meta-data/placement/availability-zone', timeout=2) except Exception: root_span.set_tag('error', True) root_span.log_kv({'exception': traceback.format_exc()}) logger.exception('Region was not specified as a parameter and' + 'can not be fetched from instance meta-data!') raise region = response.text[:-1] else: region = args.region root_span.set_tag('region', region) logger.info('Using region: {}'.format(region)) logger.info('Entity service URL: %s', args.entityservice) logger.info('Reading DNS data for hosted zones') aws.populate_dns_data() aws_account_id = aws.get_account_id(region) infrastructure_account = 'aws:{}'.format(aws_account_id) if aws_account_id else None if not infrastructure_account: logger.error('AWS agent: Cannot determine infrastructure account ID. Terminating!') return root_span.set_tag('account', infrastructure_account) # 2. ZMON entities if not args.disable_oauth2: token = os.getenv('ZMON_TOKEN', None) or tokens.get('uid') zmon_client = Zmon(args.entityservice, token=token, user_agent=get_user_agent()) query = {'infrastructure_account': infrastructure_account, 'region': region, 'created_by': 'agent'} entities = zmon_client.get_entities(query) # 3. Get running apps apps = aws.get_running_apps(region, entities) elbs = [] scaling_groups = [] elastigroups = [] certificates = [] rds = [] elasticaches = [] dynamodbs = [] sqs = [] postgresql_clusters = [] aws_limits = [] new_entities = [] to_be_removed = [] if len(apps) > 0: elbs = aws.get_running_elbs(region, infrastructure_account) scaling_groups = aws.get_auto_scaling_groups(region, infrastructure_account) elastigroups = elastigroup.get_elastigroup_entities(region, infrastructure_account) rds = aws.get_rds_instances(region, infrastructure_account, entities) elasticaches = aws.get_elasticache_nodes(region, infrastructure_account) dynamodbs = aws.get_dynamodb_tables(region, infrastructure_account) certificates = aws.get_certificates(region, infrastructure_account) aws_limits = aws.get_limits(region, infrastructure_account, apps, elbs, entities) sqs = aws.get_sqs_queues(region, infrastructure_account, entities) postgresql_clusters = postgresql.get_postgresql_clusters(region, infrastructure_account, scaling_groups, apps) account_alias = aws.get_account_alias(region) ia_entity = { 'type': 'local', 'infrastructure_account': infrastructure_account, 'account_alias': account_alias, 'region': region, 'id': 'aws-ac[{}:{}]'.format(infrastructure_account, region), 'created_by': 'agent', } account_alias_prefix = os.getenv('ACCOUNT_ALIAS_PREFIX', None) owner = account_alias if account_alias_prefix: owner = owner.replace(account_alias_prefix, '', 1) root_span.set_tag('team', owner) application_entities = aws.get_apps_from_entities(apps, infrastructure_account, region) if args.postgresql_user and args.postgresql_pass: postgresql_databases = postgresql.get_databases_from_clusters(postgresql_clusters, infrastructure_account, region, args.postgresql_user, args.postgresql_pass) else: # Pretend the list of DBs is empty, but also make sure we don't remove # any pre-existing database entities because we don't know about them. postgresql_databases = [] entities = [e for e in entities if e.get('type') != 'postgresql_database'] current_entities = ( elbs + scaling_groups + elastigroups + apps + application_entities + rds + postgresql_databases + postgresql_clusters + elasticaches + dynamodbs + certificates + sqs) current_entities.append(aws_limits) current_entities.append(ia_entity) for entity in current_entities: entity.update(entity_extras) # 4. Removing misssing entities existing_ids = get_existing_ids(entities) current_entities_ids = {e['id'] for e in current_entities} to_be_removed, delete_error_count = remove_missing_entities( existing_ids, current_entities_ids, zmon_client, json=args.json) root_span.log_kv({'total_entitites': str(len(current_entities))}) root_span.log_kv({'removed_entities': str(len(to_be_removed))}) logger.info('Found {} removed entities from {} entities ({} failed)'.format( len(to_be_removed), len(current_entities), delete_error_count)) # 5. Get new/updated entities new_entities, add_error_count = add_new_entities(current_entities, entities, zmon_client, json=args.json) root_span.log_kv({'new_entities': str(len(new_entities))}) logger.info('Found {} new entities from {} entities ({} failed)'.format( len(new_entities), len(current_entities), add_error_count)) # 6. Always add Local entity if not args.json: ia_entity['errors'] = {'delete_count': delete_error_count, 'add_count': add_error_count} update_local_entity(zmon_client, ia_entity) types = {e['type']: len([t for t in new_entities if t['type'] == e['type']]) for e in new_entities} for t, v in types.items(): logger.info('Found {} new entities of type: {}'.format(v, t)) # Check if it is a dry run! if args.json: d = { 'applications': application_entities, 'apps': apps, 'elastigroups': elastigroups, 'dynamodb': dynamodbs, 'elbs': elbs, 'elc': elasticaches, 'rds': rds, 'certificates': certificates, 'aws_limits': aws_limits, 'sqs_queues': sqs, 'new_entities': new_entities, 'to_be_removed': to_be_removed, 'posgresql_clusters': postgresql_clusters } print(json.dumps(d, indent=4))
def main(): argp = argparse.ArgumentParser(description='ZMon AWS Agent') argp.add_argument('-e', '--entity-service', dest='entityservice') argp.add_argument('-r', '--region', dest='region', default=None) argp.add_argument('-j', '--json', dest='json', action='store_true') argp.add_argument('--no-oauth2', dest='disable_oauth2', action='store_true', default=False) args = argp.parse_args() if not args.disable_oauth2: tokens.configure() tokens.manage('uid', ['uid']) tokens.start() logging.basicConfig(level=logging.INFO) if not args.region: logging.info("Trying to figure out region...") try: response = requests.get('http://169.254.169.254/latest/meta-data/placement/availability-zone', timeout=2) except: logging.error("Region was not specified as a parameter and can not be fetched from instance meta-data!") raise region = response.text[:-1] else: region = args.region logging.info("Using region: {}".format(region)) logging.info("Entity service url: %s", args.entityservice) apps = get_running_apps(region) if len(apps) > 0: infrastructure_account = apps[0]['infrastructure_account'] elbs = get_running_elbs(region, infrastructure_account) scaling_groups = get_auto_scaling_groups(region, infrastructure_account) rds = get_rds_instances(region, infrastructure_account) elasticaches = get_elasticache_nodes(region, infrastructure_account) dynamodbs = get_dynamodb_tables(region, infrastructure_account) else: elbs = [] scaling_groups = [] rds = [] if args.json: d = {'apps': apps, 'elbs': elbs, 'rds': rds, 'elc': elasticaches, 'dynamodb': dynamodbs} print(json.dumps(d)) else: if infrastructure_account is not None: account_alias = get_account_alias(region) ia_entity = {"type": "local", "infrastructure_account": infrastructure_account, "account_alias": account_alias, "region": region, "id": "aws-ac[{}:{}]".format(infrastructure_account, region), "created_by": "agent"} application_entities = get_apps_from_entities(apps, infrastructure_account, region) current_entities = [] for e in elbs: current_entities.append(e["id"]) for e in scaling_groups: current_entities.append(e["id"]) for a in apps: current_entities.append(a["id"]) for a in application_entities: current_entities.append(a["id"]) for a in rds: current_entities.append(a["id"]) for a in elasticaches: current_entities.append(a["id"]) for a in dynamodbs: current_entities.append(a["id"]) current_entities.append(ia_entity["id"]) headers = {'Content-Type': 'application/json'} if not args.disable_oauth2: token = os.getenv('ZMON_AGENT_TOKEN', tokens.get('uid')) logging.info("Adding oauth2 token to requests {}...{}".format(token[:1], token[-1:])) headers.update({'Authorization': 'Bearer {}'.format(token)}) # removing all entities query = {'infrastructure_account': infrastructure_account, 'region': region, 'created_by': 'agent'} r = requests.get(args.entityservice, params={'query': json.dumps(query)}, headers=headers, timeout=10) entities = r.json() existing_entities = {} to_remove = [] for e in entities: existing_entities[e['id']] = e if not e["id"] in current_entities: to_remove.append(e["id"]) if os.getenv('zmon_user'): auth = (os.getenv('zmon_user'), os.getenv('zmon_password', '')) else: auth = None for e in to_remove: logging.info("removing instance: {}".format(e)) r = requests.delete(args.entityservice + "{}/".format(e), auth=auth, headers=headers, timeout=3) logging.info("...%s", r.status_code) def put_entity(entity_type, entity): logging.info("Adding {} entity: {}".format(entity_type, entity['id'])) r = requests.put(args.entityservice, auth=auth, data=json.dumps(entity, default=json_serial), headers=headers, timeout=3) logging.info("...%s", r.status_code) put_entity('LOCAL', ia_entity) for instance in apps: put_entity('instance', instance) for asg in scaling_groups: put_entity('Auto Scaling group', asg) for elb in elbs: put_entity('elastic load balancer', elb) for db in rds: put_entity('RDS instance', db) # merge here or we loose it on next pull for app in application_entities: if app['id'] in existing_entities: ex = existing_entities[app['id']] if 'scalyr_ts_id' in ex: app['scalyr_ts_id'] = ex['scalyr_ts_id'] for app in application_entities: put_entity('application', app) for elasticache in elasticaches: put_entity('elasticache', elasticache) for dynamodb in dynamodbs: put_entity('dynamodb', dynamodb)