def get_databases_from_clusters(pgclusters, infrastructure_account, region, postgresql_user, postgresql_pass): entities = [] for pg in pgclusters: try: dnsname = pg.get('dnsname') if dnsname: dbnames = list_postgres_databases(host=dnsname, port=POSTGRESQL_DEFAULT_PORT, user=postgresql_user, password=postgresql_pass, dbname='postgres', sslmode='require') for db in dbnames: entity = { 'id': entity_id('{}-{}[{}:{}]'.format(db, dnsname, infrastructure_account, region)), 'type': 'postgresql_database', 'created_by': 'agent', 'infrastructure_account': infrastructure_account, 'region': region, 'postgresql_cluster': pg.get('id'), 'database_name': db, 'shards': { db: '{}:{}/{}'.format(dnsname, POSTGRESQL_DEFAULT_PORT, db) } } entities.append(entity) except Exception: logger.exception('Failed to make Database entities for PostgreSQL clusters on {}!' .format(pg.get('dnsname', ''))) return entities
def get_databases_from_clusters(pgclusters, infrastructure_account, region, postgresql_user, postgresql_pass): entities = [] for pg in pgclusters: try: dnsname = pg.get('dnsname') if dnsname: dbnames = list_postgres_databases(host=dnsname, port=POSTGRESQL_DEFAULT_PORT, user=postgresql_user, password=postgresql_pass, dbname='postgres', sslmode='require') for db in dbnames: entity = { 'id': entity_id('{}-{}[{}:{}]'.format( db, dnsname, infrastructure_account, region)), 'type': 'postgresql_database', 'created_by': 'agent', 'infrastructure_account': infrastructure_account, 'region': region, 'postgresql_cluster': pg.get('id'), 'database_name': db, 'shards': { db: '{}:{}/{}'.format(dnsname, POSTGRESQL_DEFAULT_PORT, db) } } entities.append(entity) except Exception: logger.exception( 'Failed to make Database entities for PostgreSQL clusters on {}!' .format(pg.get('dnsname', ''))) return entities
def test_aws_get_limits(monkeypatch, fail): ec2 = MagicMock() account_attrs = { 'AccountAttributes': [ {'AttributeName': 'max-instances', 'AttributeValues': [{'AttributeValue': '30'}]}, {'AttributeName': 'other', 'AttributeValues': [{'AttributeValue': '20'}]} ] } if not fail: ec2.describe_account_attributes.return_value = account_attrs else: ec2.describe_account_attributes.side_effect = Exception rds = MagicMock() account_attrs = { 'AccountQuotas': [ {'AccountQuotaName': 'ReservedDBInstances', 'Max': 100, 'Used': 10}, {'AccountQuotaName': 'AllocatedStorage', 'Max': 100, 'Used': 10}, {'AccountQuotaName': 'other', 'Max': 100, 'Used': 10}, ] } if not fail: rds.describe_account_attributes.return_value = account_attrs else: rds.describe_account_attributes.side_effect = Exception asg = MagicMock() account_limits = { 'MaxNumberOfAutoScalingGroups': 100, 'MaxNumberOfLaunchConfigurations': 100, 'NumberOfAutoScalingGroups': 20, 'NumberOfLaunchConfigurations': 20, } if not fail: asg.describe_account_limits.return_value = account_limits else: asg.describe_account_limits.side_effect = Exception iam = MagicMock() account_summary = { 'SummaryMap': { 'ServerCertificates': 1, 'ServerCertificatesQuota': 20, 'InstanceProfiles': 10, 'InstanceProfilesQuota': 20, 'Policies': 2, 'PoliciesQuota': 10, } } if not fail: iam.get_account_summary.return_value = account_summary else: iam.get_account_summary.side_effect = Exception boto = get_boto_client(monkeypatch, ec2, rds, asg, iam) apps = [ {'type': 'instance'}, {'type': 'instance'}, {'type': 'instance', 'spot_instance': True}, {'type': 'cassandra'} ] elbs = [{'type': 'elb'}, {'type': 'elb'}, {'type': 'elb'}] limits = aws.get_limits(REGION, ACCOUNT, apps, elbs, [{ 'id': aws.entity_id('aws-limits[{}:{}]'.format(ACCOUNT, REGION)), 'type': 'aws_limits', 'ec2-max-instances': 40, }]) if not fail: expected = { 'asg-max-groups': 100, 'asg-max-launch-configurations': 100, 'asg-used-groups': 20, 'asg-used-launch-configurations': 20, 'created_by': 'agent', 'ec2-max-instances': 30, 'ec2-used-instances': 2, 'ec2-max-spot-instances': 20, 'ec2-used-spot-instances': 1, 'elb-max-count': 20, 'elb-used-count': 3, 'iam-max-instance-profiles': 20, 'iam-max-policies': 10, 'iam-max-server-certificates': 20, 'iam-used-instance-profiles': 10, 'iam-used-policies': 2, 'iam-used-server-certificates': 1, 'id': 'aws-limits[aws:1234:eu-central-1]', 'infrastructure_account': 'aws:1234', 'rds-max-allocated': 100, 'rds-max-reserved': 100, 'rds-used-allocated': 10, 'rds-used-reserved': 10, 'region': 'eu-central-1', 'type': 'aws_limits' } else: expected = { 'created_by': 'agent', 'ec2-max-instances': 40, 'ec2-used-instances': 2, 'ec2-max-spot-instances': 20, 'ec2-used-spot-instances': 1, 'elb-max-count': 20, 'elb-used-count': 3, 'id': 'aws-limits[aws:1234:eu-central-1]', 'infrastructure_account': 'aws:1234', 'region': 'eu-central-1', 'type': 'aws_limits' } assert expected == limits calls = [ call('ec2', region_name=REGION), call('rds', region_name=REGION), call('autoscaling', region_name=REGION), call('iam', region_name=REGION), ] boto.assert_has_calls(calls)
def test_entity_id(monkeypatch, inp, out): assert aws.entity_id(inp) == out
def get_postgresql_clusters(region, infrastructure_account, asgs, insts): entities = [] try: addresses = collect_eip_addresses(infrastructure_account, region) spilo_asgs = filter_asgs(infrastructure_account, asgs) instances = filter_instances(infrastructure_account, insts) dns_records = collect_recordsets(infrastructure_account, region) except Exception: logger.exception( 'Failed to collect the AWS objects for PostgreSQL cluster detection' ) return [] launch_configs = [] # we will use the ASGs as a skeleton for building the entities for cluster in spilo_asgs: cluster_name = cluster['spilo_cluster'] cluster_instances = [] eip = [] public_ip_instance_id = '' allocation_error = '' public_ip = '' for i in cluster['instances']: instance_id = i['aws_id'] try: i_data = [ inst for inst in instances if inst['aws_id'] == instance_id ][0] except IndexError: logger.exception('Failed to find a Spilo EC2 instance: %s', instance_id) private_ip = i_data['ip'] role = i_data.get('role', '') cluster_instances.append({ 'instance_id': instance_id, 'private_ip': private_ip, 'role': role }) address = [ a for a in addresses if a.get('InstanceId') == instance_id ] if address: eip.append(address[0] ) # we currently expect only one EIP per instance if len(eip) > 1: pass # in the future, this might be a valid case, when replicas also get public IPs elif not eip: # in this case we have to look at the cluster definition, to see if there was an EIP assigned, # but for some reason currently is not. # this is so for reducing boto3 call numbers try: if not launch_configs: launch_configs = collect_launch_configurations( infrastructure_account, region) eip_allocation = extract_eipalloc_from_lc( launch_configs, cluster_name) if eip_allocation: address = [ a for a in addresses if a.get('AllocationId') == eip_allocation ] if address: public_ip = address[0]['PublicIp'] allocation_error = 'There is a public IP defined but not attached to any instance' except Exception: logger.exception('Failed to collect launch configurations') return [] else: public_ip = eip[0]['PublicIp'] public_ip_instance_id = eip[0]['InstanceId'] dnsname = dns_records.get(public_ip, '') entities.append({ 'type': 'postgresql_cluster', 'id': entity_id('pg-{}[{}:{}]'.format(cluster_name, infrastructure_account, region)), 'region': region, 'spilo_cluster': cluster_name, 'elastic_ip': public_ip, 'elastic_ip_instance_id': public_ip_instance_id, 'allocation_error': allocation_error, 'instances': cluster_instances, 'infrastructure_account': infrastructure_account, 'dnsname': dnsname, 'shards': { 'postgres': '{}:5432/postgres'.format(dnsname) } }) return entities
def get_elastigroup_entities(region, acc, **kwargs): groups = [] current_span = extract_span_from_kwargs(**kwargs) current_span.set_tag("aws_region", region) current_span.set_tag("account_id", acc) try: cf = boto3.client('cloudformation', region_name=region) stack_names = get_all_stack_names(cf) for stack_name in stack_names: elastigroups = get_elastigroup_resources(cf, stack_name) for eg_data in elastigroups: eg_details = get_elastigroup(eg_data, **kwargs) eg_name = eg_details.get( 'name', eg_details.get('id', 'unknown-elastigroup')) capacity = eg_details.get('capacity', {}) strategy = eg_details.get('strategy', {}) compute = eg_details.get('compute', {}) eg = { 'id': entity_id('elastigroup-{}[{}:{}]'.format( eg_name, acc, region)), 'type': 'elastigroup', 'infrastructure_account': acc, 'region': region, 'created_by': 'agent', 'name': eg_name, 'availability_zones': [ az.get('name', 'unknown-az') for az in eg_details.get( 'compute', {}).get('availability_zones', []) ], 'desired_capacity': capacity.get('target', 1), 'max_size': capacity.get('maximum', 1), 'min_size': capacity.get('minimum', 1), 'cloud_account_id': eg_data.account_id, 'elastigroup_id': eg_data.group_id, 'risk': strategy.get('risk', 100), 'orientation': strategy.get('availability_vs_cost', 'balanced'), 'instance_types': compute.get('instance_types', None), 'created_time': eg_details.get('created_at', None), } for tag in compute.get('launch_specification', {}).get('tags', []): key = inflection.underscore(tag.get('tag_key', None)) val = tag.get('tag_value', None) if key and val and key not in eg: eg[key] = val add_traffic_tags_to_entity(eg) eg['instances'] = [] instances = get_elastigroup_instances(eg_data) for instance in instances: eg['instances'].append(extract_instance_details(instance)) groups.append(eg) except Exception as e: current_span.set_tag('error', True) current_span.log_kv({'exception': traceback.format_exc()}) if isinstance( e, ClientError) and e.response['Error']['Code'] == 'AccessDenied': msg = 'Access to required AWS API denied. Skipping Elastigroup discovery.' logger.warning(msg) current_span.log_kv({'message': msg}) current_span.set_tag('permission_error', True) else: logger.exception('Failed to discover Elastigroups') return groups
def get_postgresql_clusters(region, infrastructure_account, asgs, insts): entities = [] try: addresses = collect_eip_addresses(infrastructure_account, region) spilo_asgs = filter_asgs(infrastructure_account, asgs) instances = filter_instances(infrastructure_account, insts) dns_records = collect_recordsets(infrastructure_account, region) except Exception: logger.exception('Failed to collect the AWS objects for PostgreSQL cluster detection') return [] launch_configs = [] # we will use the ASGs as a skeleton for building the entities for cluster in spilo_asgs: cluster_name = cluster['spilo_cluster'] cluster_instances = [] eip = [] public_ip_instance_id = '' allocation_error = '' public_ip = '' for i in cluster['instances']: instance_id = i['aws_id'] try: i_data = [inst for inst in instances if inst['aws_id'] == instance_id][0] except IndexError: logger.exception('Failed to find a Spilo EC2 instance: %s', instance_id) private_ip = i_data['ip'] role = i_data.get('role', '') cluster_instances.append({'instance_id': instance_id, 'private_ip': private_ip, 'role': role}) address = [a for a in addresses if a.get('InstanceId') == instance_id] if address: eip.append(address[0]) # we currently expect only one EIP per instance if len(eip) > 1: pass # in the future, this might be a valid case, when replicas also get public IPs elif not eip: # in this case we have to look at the cluster definition, to see if there was an EIP assigned, # but for some reason currently is not. # this is so for reducing boto3 call numbers try: if not launch_configs: launch_configs = collect_launch_configurations(infrastructure_account, region) eip_allocation = extract_eipalloc_from_lc(launch_configs, cluster_name) if eip_allocation: address = [a for a in addresses if a.get('AllocationId') == eip_allocation] if address: public_ip = address[0]['PublicIp'] allocation_error = 'There is a public IP defined but not attached to any instance' except Exception: logger.exception('Failed to collect launch configurations') return [] else: public_ip = eip[0]['PublicIp'] public_ip_instance_id = eip[0]['InstanceId'] dnsname = dns_records.get(public_ip, '') entities.append({'type': 'postgresql_cluster', 'id': entity_id('pg-{}[{}:{}]'.format(cluster_name, infrastructure_account, region)), 'region': region, 'spilo_cluster': cluster_name, 'elastic_ip': public_ip, 'elastic_ip_instance_id': public_ip_instance_id, 'allocation_error': allocation_error, 'instances': cluster_instances, 'infrastructure_account': infrastructure_account, 'dnsname': dnsname, 'shards': {'postgres': '{}:5432/postgres'.format(dnsname)}}) return entities