def ec2_summary(aws_config=None): """ @type aws_config: Config """ elb_client = get_client(client_type='elb', config=aws_config) ec2_client = get_client(client_type='ec2', config=aws_config) elbs = len( elb_client.describe_load_balancers().get('LoadBalancerDescriptions')) instances = list() for reservation in ec2_client.describe_instances().get('Reservations', []): for instance in reservation.get('Instances'): instances.append(instance) amis = len(list(ec2_client.describe_images(Owners=['self']).get('Images'))) secgroups = len(ec2_client.describe_security_groups().get( 'SecurityGroups', 0)) addresses = ec2_client.describe_addresses()['Addresses'] eips = len([x for x, _ in enumerate(addresses)]) volumes = ec2_client.describe_volumes().get('Volumes') summary = { 'instances': instances, 'elbs': elbs, 'eips': eips, 'amis': amis, 'secgroups': secgroups, 'volumes': volumes } output_ec2_summary(summary=summary) exit(0)
def delete_orphaned_snapshots(aws_config=None, noop=False): """ @type aws_config: Config @type noop: bool """ account_id = None try: iam_client = get_client(client_type='iam', config=aws_config) users = iam_client.list_users(MaxItems=1) if users.get('Users'): account_id = users.get('Users')[0]['Arn'].split(':')[4] except: exit('Unable to get account details. Please check your permissions.') if account_id: ec2_client = get_client(client_type='ec2', config=aws_config) desc_snapshots_result = ec2_client.describe_snapshots( OwnerIds=[account_id]) snapshots = desc_snapshots_result.get('Snapshots') desc_images_result = ec2_client.describe_images() images = desc_images_result['Images'] image_id_list = [image['ImageId'] for image in images] orphaned_snapshots = list() for snapshot in snapshots: snapshot_description = snapshot.get('Description') if snapshot_description.startswith('Created by CreateImage'): ami_string_start = snapshot_description.find('ami-') ami_string_end = snapshot_description.find( ' ', ami_string_start) ami_id = snapshot_description[ami_string_start:ami_string_end] if ami_id not in image_id_list: orphaned_snapshots.append(snapshot) if orphaned_snapshots: if not noop: print("Total snapshots: {}".format(len(snapshots))) print("Orphaned snapshots: {}".format(len(orphaned_snapshots))) total_deleted = 0 try: for index, orphaned_snapshot in enumerate( orphaned_snapshots, start=1): ec2_client.delete_snapshot( SnapshotId=orphaned_snapshot.get('SnapshotId')) total_deleted = index except: print('An error occurred whilst deleting snapshots.') exit('Deleted {0} out of a possible {1}.'.format( total_deleted, len(orphaned_snapshots))) else: output_snapshot_list(snapshots=orphaned_snapshots) else: print('No orphaned snapshots were found.')
def delete_orphaned_snapshots(aws_config=None, noop=False): """ @type aws_config: Config @type noop: bool """ account_id = None try: iam_client = get_client(client_type='iam', config=aws_config) users = iam_client.list_users(MaxItems=1) if users.get('Users'): account_id = users.get('Users')[0]['Arn'].split(':')[4] except: exit('Unable to get account details. Please check your permissions.') if account_id: ec2_client = get_client(client_type='ec2', config=aws_config) desc_snapshots_result = ec2_client.describe_snapshots(OwnerIds=[account_id]) snapshots = desc_snapshots_result.get('Snapshots') desc_images_result = ec2_client.describe_images() images = desc_images_result['Images'] image_id_list = [image['ImageId'] for image in images] orphaned_snapshots = list() for snapshot in snapshots: snapshot_description = snapshot.get('Description') if snapshot_description.startswith('Created by CreateImage'): ami_string_start = snapshot_description.find('ami-') ami_string_end = snapshot_description.find(' ', ami_string_start) ami_id = snapshot_description[ami_string_start:ami_string_end] if ami_id not in image_id_list: orphaned_snapshots.append(snapshot) if orphaned_snapshots: if not noop: print("Total snapshots: {}".format(len(snapshots))) print("Orphaned snapshots: {}".format(len(orphaned_snapshots))) total_deleted = 0 try: for index, orphaned_snapshot in enumerate(orphaned_snapshots, start=1): ec2_client.delete_snapshot(SnapshotId=orphaned_snapshot.get('SnapshotId')) total_deleted = index except: print('An error occurred whilst deleting snapshots.') exit('Deleted {0} out of a possible {1}.'.format(total_deleted, len(orphaned_snapshots))) else: output_snapshot_list(snapshots=orphaned_snapshots) else: print('No orphaned snapshots were found.')
def ec2_manage(aws_config=None, instance_id=None, action=None): """ @type aws_config: Config @type instance_id: unicode @type action: unicode """ ec2_client = get_client(client_type='ec2', config=aws_config) reservations = ec2_client.describe_instances(InstanceIds=[instance_id]) instance = reservations.get('Reservations')[0].get('Instances')[0] try: instance_id = instance.get('InstanceId') instance_state = instance['State']['Name'] if instance_id: if action == 'stop': ec2_instance_stop(instance_id=instance_id, instance_state=instance_state, ec2_client=ec2_client) elif action == 'start': ec2_instance_start(instance_id=instance_id, instance_state=instance_state, ec2_client=ec2_client) elif action == 'reboot': ec2_instance_reboot(instance_id=instance_id, instance_state=instance_state, ec2_client=ec2_client) elif action == 'terminate': ec2_instance_terminate(instance_id=instance_id, instance_state=instance_state, ec2_client=ec2_client) except AttributeError: exit("Cannot find instance: {0}".format(instance_id))
def s3_rm(aws_config=None, item=None): """ @type aws_config: Config @type item: unicode """ s3_client = get_client(client_type='s3', config=aws_config) prefix = '' bucket_name = '' if item and '/' in item: path_elements = item.split('/') bucket_name = path_elements[0] prefix = "/".join(path_elements[1:]) if prefix.endswith('/'): exit('Only keys can currently be removed.') check_bucket_accessible(s3_client=s3_client, bucket_name=bucket_name) else: exit('Invalid key.') try: s3_client.head_object(Bucket=bucket_name, Key=prefix) except ClientError: exit('Cannot access \'{0}\'.'.format(item)) try: s3_client.delete_objects(Bucket=bucket_name, Delete={'Objects': [{'Key': prefix}, ]}) exit('\'{0}\' deleted.'.format(item)) except ClientError as error: if 'NoSuchBucket' in error.response['Error']['Code']: exit('Bucket not found.') elif 'NoSuchKey' in error.response['Error']['Code']: exit('Key not found.') else: exit('Unhandled error: {0}'.format(error.response['Error']['Code']))
def get_elb_list(aws_config): """ @type aws_config: Config """ elb_client = get_client(client_type='elb', config=aws_config) elbs = elb_client.describe_load_balancers().get('LoadBalancerDescriptions') return elbs
def s3_info(aws_config=None, item=None): """ @type aws_config: Config @type item: unicode """ s3_client = get_client(client_type='s3', config=aws_config) prefix = '' if item and '/' in item: path_elements = item.split('/') bucket_name = path_elements[0] prefix = "/".join(path_elements[1:]) if prefix.endswith('/'): prefix = prefix[:-1] else: bucket_name = item buckets = s3_client.list_buckets() owner = buckets.get('Owner') try: if bucket_name: check_bucket_accessible(s3_client=s3_client, bucket_name=bucket_name) s3_object = s3_client.get_object(Bucket=bucket_name, Key=prefix) output_s3_info(s3_object=s3_object, key=prefix, bucket=bucket_name) else: output_s3_info(owner=owner) except ClientError as ce: if 'NoSuchBucket' in ce.response['Error']['Code']: exit('Bucket not found.') elif 'NoSuchKey' in ce.response['Error']['Code']: exit('Key not found.') else: exit('Unhandled error: {0}'.format(ce.response['Error']['Code']))
def iam_user_info(aws_config=None, username=None): """ @type aws_config: Config @type username: str """ iam_client = get_client(client_type='iam', config=aws_config) try: user_response = iam_client.get_user(UserName=username) user = user_response['User'] user_access_keys_response = iam_client.list_access_keys(UserName=username) user_access_keys = user_access_keys_response.get('AccessKeyMetadata') user_policies_response = iam_client.list_user_policies(UserName=username) user_policies = user_policies_response.get('PolicyNames') user_groups_response = iam_client.list_groups_for_user(UserName=username) user_groups = user_groups_response.get('Groups') group_names = list() for ug in user_groups: group_names.append(ug['GroupName']) mfa_devices_response = iam_client.list_virtual_mfa_devices(AssignmentStatus='Assigned') mfa_devices = mfa_devices_response.get('VirtualMFADevices') user_mfa_devices = list() for mfa_device in mfa_devices: if mfa_device['User']['UserName'] == username: user_mfa_devices.append(mfa_device) output_iam_user_info(user=user, user_mfa_devices=user_mfa_devices, user_access_keys=user_access_keys, user_policies=user_policies, user_groups=group_names) except (ClientError, IndexError): exit("Cannot find user: {0}".format(username))
def secgroup_list(aws_config=None): """ @type aws_config: Config """ ec2_client = get_client(client_type='ec2', config=aws_config) secgroups = ec2_client.describe_security_groups() output_secgroup_list(secgroups=secgroups)
def get_elb_list(aws_config): """ @type aws_config: Config """ elb_client = get_client(client_type="elb", config=aws_config) elbs = elb_client.describe_load_balancers().get("LoadBalancerDescriptions") return elbs
def summary(aws_config=None): """ @type aws_config: Config """ iam_client = get_client(client_type='iam', config=aws_config) summary_response = iam_client.get_account_summary() summary_map = summary_response['SummaryMap'] output_iam_summary(summary_map=summary_map)
def vpc_list(aws_config=None): """ @type aws_config: Config """ ec2_client = get_client(client_type='ec2', config=aws_config) vpcs = ec2_client.describe_vpcs() if vpcs.get('Vpcs'): output_vpc_list(vpcs=vpcs) else: exit("No VPCs found.")
def eip_list(aws_config=None): """ @type aws_config: Config """ ec2_client = get_client(client_type='ec2', config=aws_config) addresses_response = ec2_client.describe_addresses() addresses = addresses_response.get('Addresses') if addresses: output_eip_list(addresses=addresses) exit('No elastic IPs found.')
def route53_list(aws_config=None): """ @type aws_config: Config """ route53_client = get_client(client_type='route53', config=aws_config) zones = route53_client.list_hosted_zones() if zones.get('HostedZones'): output_route53_list(zones=route53_client.list_hosted_zones()) else: exit("No hosted zones found.")
def fake_vpcs(): """VPC mock service""" mock = mock_ec2() mock.start() ec2_client = get_client(client_type='ec2', config=config) vpc = ec2_client.create_vpc(CidrBlock='10.0.0.0/16') print(vpc) ec2_client.create_subnet(VpcId=vpc['Vpc']['VpcId'], CidrBlock="10.0.0.0/18") yield ec2_client.describe_vpcs() mock.stop()
def lc_list(aws_config=None): """ @type aws_config: Config """ asg_client = get_client(client_type='autoscaling', config=aws_config) all_lcs = asg_client.describe_launch_configurations().get('LaunchConfigurations') if all_lcs: output_lc_list(lc_list=all_lcs) else: exit("No launch configurations were found.")
def asg_list(aws_config=None): """ @type aws_config: Config """ asg_client = get_client(client_type='autoscaling', config=aws_config) all_asgs = asg_client.describe_auto_scaling_groups().get('AutoScalingGroups') if all_asgs: output_asg_list(asg_list=all_asgs) else: exit("No auto scaling groups were found.")
def es_list(aws_config=None): """ @type aws_config: Config """ es_client = get_client(client_type='es', config=aws_config) domains = es_client.list_domain_names() domain_names = domains.get('DomainNames') if domain_names: output_domain_list(domains=domain_names) else: exit("No domains found.")
def asg_list(aws_config=None): """ @type aws_config: Config """ asg_client = get_client(client_type='autoscaling', config=aws_config) all_asgs = asg_client.describe_auto_scaling_groups().get( 'AutoScalingGroups') if all_asgs: output_asg_list(asg_list=all_asgs) else: exit("No auto scaling groups were found.")
def lc_list(aws_config=None): """ @type aws_config: Config """ asg_client = get_client(client_type='autoscaling', config=aws_config) all_lcs = asg_client.describe_launch_configurations().get( 'LaunchConfigurations') if all_lcs: output_lc_list(lc_list=all_lcs) else: exit("No launch configurations were found.")
def asg_info(aws_config=None, asg_name=None): """ @type aws_config: Config @type asg_name: unicode """ asg_client = get_client(client_type='autoscaling', config=aws_config) asg = asg_client.describe_auto_scaling_groups(AutoScalingGroupNames=[asg_name]) if asg.get('AutoScalingGroups'): output_asg_info(asg=asg.get('AutoScalingGroups')[0]) else: exit("Auto Scaling Group: {0} not found.".format(asg_name))
def efs_list(aws_config=None): """ @type aws_config: Config """ efs_client = get_client(client_type='efs', config=aws_config) res = efs_client.describe_file_systems() filesystems = res.get('FileSystems') if filesystems: output_filesystems(filesystems=filesystems) else: exit("No file systems found.")
def account_info(aws_config): """ @type aws_config: Config """ iam_client = get_client(client_type='iam', config=aws_config) users = iam_client.list_users(MaxItems=1) if users.get('Users'): account_id = users.get('Users')[0]['Arn'].split(':')[4] aliases = iam_client.list_account_aliases().get('AccountAliases') output_account_info(account_id=account_id, account_aliases=aliases)
def iam_user_list(aws_config=None): """ @type aws_config: Config """ iam_client = get_client(client_type='iam', config=aws_config) users_response = iam_client.list_users() mfa_devices_response = iam_client.list_virtual_mfa_devices(AssignmentStatus='Assigned') mfa_devices = mfa_devices_response.get('VirtualMFADevices') users = users_response.get('Users') if users: output_iam_user_list(users=users, mfa_devices=mfa_devices) exit('No users found.')
def asg_info(aws_config=None, asg_name=None): """ @type aws_config: Config @type asg_name: unicode """ asg_client = get_client(client_type='autoscaling', config=aws_config) asg = asg_client.describe_auto_scaling_groups( AutoScalingGroupNames=[asg_name]) if asg.get('AutoScalingGroups'): output_asg_info(asg=asg.get('AutoScalingGroups')[0]) else: exit("Auto Scaling Group: {0} not found.".format(asg_name))
def vpc_info(aws_config=None, vpc_id=None): """ @type aws_config: Config @type vpc_id: unicode """ ec2_client = get_client(client_type='ec2', config=aws_config) try: vpcs = ec2_client.describe_vpcs(VpcIds=[vpc_id]) all_subnets = ec2_client.describe_subnets(Filters=[{'Name': 'vpc-id', 'Values': [vpc_id]}]) output_vpc_info(vpc=vpcs['Vpcs'][0], subnets=all_subnets) except (ClientError, IndexError): exit("Cannot find VPC: {0}".format(vpc_id))
def es_info(aws_config=None, domain_name=None): """ @type aws_config: Config @type domain_name: unicode """ es_client = get_client(client_type='es', config=aws_config) try: domain = es_client.describe_elasticsearch_domains( DomainNames=[domain_name]) output_domain_info(domain=domain) except (ClientError, IndexError): exit("Cannot find domain: {0}".format(domain_name))
def ec2_summary(aws_config=None): """ @type aws_config: Config """ elb_client = get_client(client_type='elb', config=aws_config) ec2_client = get_client(client_type='ec2', config=aws_config) elbs = len(elb_client.describe_load_balancers().get('LoadBalancerDescriptions')) instances = list() for reservation in ec2_client.describe_instances().get('Reservations', []): for instance in reservation.get('Instances'): instances.append(instance) amis = len(list(ec2_client.describe_images(Owners=['self']).get('Images'))) secgroups = len(ec2_client.describe_security_groups().get('SecurityGroups', 0)) addresses = ec2_client.describe_addresses()['Addresses'] eips = len([x for x, _ in enumerate(addresses)]) volumes = ec2_client.describe_volumes().get('Volumes') summary = {'instances': instances, 'elbs': elbs, 'eips': eips, 'amis': amis, 'secgroups': secgroups, 'volumes': volumes} output_ec2_summary(summary=summary) exit(0)
def ami_info(aws_config=None, ami_id=None): """ @type aws_config: Config @type ami_id: unicode """ ec2_client = get_client(client_type='ec2', config=aws_config) try: output_ami_info(ami=ec2_client.describe_images(ImageIds=[ami_id]).get('Images')[0], launch_permissions=ec2_client.describe_image_attribute(ImageId=ami_id, Attribute='launchPermission')) except ClientError: exit('Unable to find ami: {0}'.format(ami_id))
def lc_info(aws_config=None, lc_name=None): """ @type aws_config: Config @type lc_name: unicode """ asg_client = get_client(client_type='autoscaling', config=aws_config) lc = asg_client.describe_launch_configurations(LaunchConfigurationNames=[lc_name]) lc_details = lc.get('LaunchConfigurations') if lc_details: output_lc_info(lc=lc_details[0]) else: exit("Launch Configuration: {0} not found.".format(lc_name))
def ec2_get_instance_vols(aws_config=None, instance_id=None): """ @type aws_config: Config @type instance_id: unicode """ ec2_client = get_client(client_type='ec2', config=aws_config) reservations = ec2_client.describe_instances(InstanceIds=[instance_id]) reservation = reservations.get('Reservations')[0] instance = reservation.get('Instances')[0] vols = list() for bdm in instance.get('BlockDeviceMappings'): vols.append(bdm) return vols
def ami_info(aws_config=None, ami_id=None): """ @type aws_config: Config @type ami_id: unicode """ ec2_client = get_client(client_type='ec2', config=aws_config) try: output_ami_info( ami=ec2_client.describe_images(ImageIds=[ami_id]).get('Images')[0], launch_permissions=ec2_client.describe_image_attribute( ImageId=ami_id, Attribute='launchPermission')) except ClientError: exit('Unable to find ami: {0}'.format(ami_id))
def eip_info(aws_config=None, eip=None): """ @type aws_config: Config @type eip: unicode """ ec2_client = get_client(client_type='ec2', config=aws_config) try: addresses_response = ec2_client.describe_addresses(PublicIps=[eip]) address = addresses_response.get('Addresses')[0] if address: output_eip_info(address=address) except ClientError: exit('EIP {0} not found.'.format(eip))
def get_elb(aws_config, elb_name=None): """ @type aws_config: Config @type elb_name: unicode """ if elb_name: try: elb_client = get_client(client_type='elb', config=aws_config) elbs = elb_client.describe_load_balancers(LoadBalancerNames=[elb_name]) if elbs and elbs.get('LoadBalancerDescriptions'): return elbs.get('LoadBalancerDescriptions')[0] except botocore.exceptions.ClientError: exit('ELB: {0} could not be found.'.format(elb_name))
def get_elb(aws_config, elb_name=None): """ @type aws_config: Config @type elb_name: unicode """ if elb_name: try: elb_client = get_client(client_type="elb", config=aws_config) elbs = elb_client.describe_load_balancers(LoadBalancerNames=[elb_name]) if elbs and elbs.get("LoadBalancerDescriptions"): return elbs.get("LoadBalancerDescriptions")[0] except botocore.exceptions.ClientError: exit("ELB: {0} could not be found.".format(elb_name))
def secgroup_info(aws_config=None, secgroup_id=None): """ @type aws_config: Config @type secgroup_id: unicode """ ec2_client = get_client(client_type='ec2', config=aws_config) try: result = ec2_client.describe_security_groups(GroupIds=[secgroup_id]) secgroups = result.get('SecurityGroups') secgroup = secgroups[0] output_secgroup_info(secgroup=secgroup) except (ClientError, IndexError): exit("Cannot find security group: {0}.".format(secgroup_id))
def lc_info(aws_config=None, lc_name=None): """ @type aws_config: Config @type lc_name: unicode """ asg_client = get_client(client_type='autoscaling', config=aws_config) lc = asg_client.describe_launch_configurations( LaunchConfigurationNames=[lc_name]) lc_details = lc.get('LaunchConfigurations') if lc_details: output_lc_info(lc=lc_details[0]) else: exit("Launch Configuration: {0} not found.".format(lc_name))
def ec2_net(aws_config=None, instance_id=None, intervals=None, period=None, start=None, end=datetime.datetime.utcnow()): """ @type aws_config: Config @type instance_id: unicode @type intervals: int @type period: int @type start: datetime @type end: datetime """ if not intervals: intervals = 60 if not period: period = 7200 cloudwatch_client = get_client(client_type='cloudwatch', config=aws_config) if not start: start = end - datetime.timedelta(seconds=period) net_in = cloudwatch_client.get_metric_statistics( Namespace='AWS/EC2', MetricName='NetworkIn', Dimensions=[{'Name': 'InstanceId', 'Value': instance_id}], StartTime=start, EndTime=datetime.datetime.utcnow(), Period=intervals, Statistics=['Average'], Unit='Bytes' ) net_out = cloudwatch_client.get_metric_statistics( Namespace='AWS/EC2', MetricName='NetworkOut', Dimensions=[{'Name': 'InstanceId', 'Value': instance_id}], StartTime=start, EndTime=datetime.datetime.utcnow(), Period=intervals, Statistics=['Average'], Unit='Bytes' ) net_in_datapoints, net_out_datapoints = net_in.get('Datapoints'), net_out.get('Datapoints') if not all((net_in_datapoints, net_out_datapoints)): exit("Metrics unavailable.") sorted_net_in_datapoints = sorted(net_in_datapoints, key=lambda v: v.get('Timestamp')) sorted_net_out_datapoints = sorted(net_out_datapoints, key=lambda v: v.get('Timestamp')) in_dates = [x1.get('Timestamp') for x1 in sorted_net_in_datapoints] in_values = [x2.get('Average') for x2 in sorted_net_in_datapoints] out_dates = [x3.get('Timestamp') for x3 in sorted_net_out_datapoints] out_values = [x4.get('Average') for x4 in sorted_net_out_datapoints] output_ec2_net(in_dates=in_dates, in_values=in_values, out_dates=out_dates, out_values=out_values, instance_id=instance_id) exit(0)
def ec2_vol(aws_config=None, instance_id=None, intervals=None, period=None, start=None, end=datetime.datetime.utcnow()): """ @type aws_config: Config @type instance_id: unicode @type intervals: int @type period: int @type start: datetime @type end: datetime """ ebs_vols = ec2_get_instance_vols(aws_config=aws_config, instance_id=instance_id) if not intervals: intervals = 60 if not period: period = 7200 cloudwatch_client = get_client(client_type='cloudwatch', config=aws_config) if not start: start = end - datetime.timedelta(seconds=period) vol_datapoints = list() for ebs_vol in ebs_vols: read_ops = cloudwatch_client.get_metric_statistics( Namespace='AWS/EBS', MetricName='VolumeReadBytes', Dimensions=[{'Name': 'VolumeId', 'Value': ebs_vol['Ebs']['VolumeId']}], StartTime=start, EndTime=datetime.datetime.utcnow(), Period=intervals, Statistics=['Average'], Unit='Bytes' ) write_ops = cloudwatch_client.get_metric_statistics( Namespace='AWS/EBS', MetricName='VolumeWriteBytes', Dimensions=[{'Name': 'VolumeId', 'Value': ebs_vol['Ebs']['VolumeId']}], StartTime=start, EndTime=datetime.datetime.utcnow(), Period=intervals, Statistics=['Average'], Unit='Bytes' ) sorted_read_datapoints = sorted(read_ops.get('Datapoints'), key=lambda v: v.get('Timestamp')) sorted_write_datapoints = sorted(write_ops.get('Datapoints'), key=lambda v: v.get('Timestamp')) read_dates = [x1.get('Timestamp') for x1 in sorted_read_datapoints] read_values = [x2.get('Average') for x2 in sorted_read_datapoints] write_dates = [y1.get('Timestamp') for y1 in sorted_write_datapoints] write_values = [y2.get('Average') for y2 in sorted_write_datapoints] vol_datapoints.append({'device_name': ebs_vol['DeviceName'], 'read_dates': read_dates, 'read_values': read_values, 'write_dates': write_dates, 'write_values': write_values}) output_ec2_vols(vols_datapoints=vol_datapoints, instance_id=instance_id)
def route53_info(aws_config=None, zone_id=None): """ @type aws_config: Config @type zone_id: unicode """ route53_client = get_client(client_type='route53', config=aws_config) try: hosted_zone = route53_client.get_hosted_zone(Id=zone_id) record_sets = route53_client.list_resource_record_sets(HostedZoneId=zone_id) if hosted_zone['HostedZone']['Id']: output_route53_info(zone=hosted_zone, record_sets=record_sets) except botocore.exceptions.ClientError: exit("Cannot request hosted zone: {0}".format(zone_id))
def ec2_info(aws_config=None, instance_id=None): """ @type aws_config: Config @type instance_id: unicode """ ec2_client = get_client(client_type='ec2', config=aws_config) ec2_query = ec2_client.describe_instances(Filters=[{'Name': 'instance-id', 'Values': [instance_id]}]) reservations = ec2_query.get('Reservations') try: instance = reservations[0].get('Instances')[0] if instance.get('InstanceId'): output_ec2_info(instance=instance) except IndexError: raise SystemExit("Cannot find instance: {0}".format(instance_id))
def vpc_info(aws_config=None, vpc_id=None): """ @type aws_config: Config @type vpc_id: unicode """ ec2_client = get_client(client_type='ec2', config=aws_config) try: vpcs = ec2_client.describe_vpcs(VpcIds=[vpc_id]) all_subnets = ec2_client.describe_subnets(Filters=[{ 'Name': 'vpc-id', 'Values': [vpc_id] }]) output_vpc_info(vpc=vpcs['Vpcs'][0], subnets=all_subnets) except (ClientError, IndexError): exit("Cannot find VPC: {0}".format(vpc_id))
def asg_cpu(aws_config=None, asg_name=None, start=None, period=None, intervals=None, output_type=None): """ @type aws_config: Config @type asg_name: unicode @type intervals: int @type period: int @type start: datetime @type output_type: unicode """ end = datetime.datetime.utcnow() if not output_type or output_type == 'graph': if not intervals: intervals = 60 if not period: period = 7200 cloudwatch_client = get_client(client_type='cloudwatch', config=aws_config) if not start: start = end - datetime.timedelta(seconds=period) out = cloudwatch_client.get_metric_statistics( Namespace='AWS/EC2', MetricName='CPUUtilization', Dimensions=[ { 'Name': 'AutoScalingGroupName', 'Value': asg_name } ], StartTime=start, EndTime=datetime.datetime.utcnow(), Period=intervals, Statistics=[ 'Average', ], Unit='Percent' ) datapoints = out.get('Datapoints') sorted_datapoints = sorted(datapoints, key=lambda v: v.get('Timestamp')) dates = [y1.get('Timestamp') for y1 in sorted_datapoints] values = [y2.get('Average') for y2 in sorted_datapoints] output_asg_cpu(dates=dates, values=values, asg_name=asg_name) exit(0) elif output_type == 'table': print("table") exit(0)
def ami_list(aws_config=None, filter_term=None): """ @type aws_config: Config @type filter_term: unicode """ all_images = list() ec2_client = get_client(client_type='ec2', config=aws_config) for image in ec2_client.describe_images(Owners=['self']).get('Images'): if filter_term and filter_term not in image.get('Name'): continue all_images.append(image) if all_images: output_ami_list(amis=all_images) elif filter_term: exit('No mathching amis found.') else: exit('No amis found.')
def ec2_list(aws_config=None, filter_term=None): """ @type aws_config: Config @type filter_term: unicode """ ec2_client = get_client(client_type='ec2', config=aws_config) instances_req = ec2_client.describe_instances() reservations = instances_req.get('Reservations') all_instances = list() for reservation in reservations: for instance in reservation.get('Instances'): if instance.get('Tags') and filter_term and filter_term not in get_tag_value(name='Name', tags=instance.get('Tags')): continue all_instances.append(instance) if all_instances: output_ec2_list(instances=all_instances) exit('No ec2 instances found.')
def delete_unnamed_volumes(aws_config=None, noop=False): """ @type aws_config: Config @type noop: bool """ ec2_client = get_client(client_type='ec2', config=aws_config) desc_volumes_result = ec2_client.describe_volumes() volumes = desc_volumes_result.get('Volumes') total_volumes = len(volumes) volumes_to_delete = list() for volume in volumes: volume_has_name = False volume_tags = volume.get('Tags') if volume_tags: for volume_tag in volume_tags: if volume_tag.get('Key') and volume_tag.get('Key') == 'Name': volume_has_name = True break if not volume_has_name and not volume.get('Attachments'): volumes_to_delete.append(volume) total_volumes_to_delete = len(volumes_to_delete) total_volumes_deleted = 0 if not noop: try: for index, volume_to_delete in enumerate(volumes_to_delete, start=1): ec2_client.delete_volume(VolumeId=volume_to_delete.get('VolumeId')) total_volumes_deleted = index except: print('An error occurred whislt deleting volumes.') if total_volumes_to_delete: exit('Deleted {0} volumes out of a total of {1} volumes ' 'that are unnamed and unattached.'.format(total_volumes_deleted, total_volumes_to_delete)) else: exit('No unnamed and unattached volumes were found.') elif total_volumes_to_delete: exit('There are {0} volumes out of a total of {1} volumes ' 'that are unnamed and unattached.'.format(total_volumes_to_delete, total_volumes)) else: exit('No unnamed and unattached volumes were found.')
def asg_delete(aws_config=None, asg_name=None): """ @type aws_config: Config @type asg_name: unicode """ asg_client = get_client(client_type='autoscaling', config=aws_config) asg = asg_client.describe_auto_scaling_groups(AutoScalingGroupNames=[asg_name]) if asg.get('AutoScalingGroups'): asg_instance = asg.get('AutoScalingGroups')[0] asg_instance_name = asg_instance.get('AutoScalingGroupName') asg_client.update_auto_scaling_group( AutoScalingGroupName=asg_instance_name, MinSize=0, MaxSize=0, ) asg_client.delete_auto_scaling_group( AutoScalingGroupName=asg_instance_name, ForceDelete=False ) exit("Auto Scaling Group {0} is being deleted.".format(asg_instance_name)) else: exit("Auto Scaling Group: {0} not found.".format(asg_name))
def s3_list(aws_config=None, item=None): """ @type aws_config: Config @type item: unicode """ s3_client = get_client(client_type='s3', config=aws_config) buckets = s3_client.list_buckets() if not item: if buckets.get('Buckets'): output_s3_list(buckets=buckets.get('Buckets')) else: exit("No buckets found.") else: prefix = '' if item and '/' in item: path_elements = item.split('/') bucket_name = path_elements[0] prefix = "/".join(path_elements[1:]) else: bucket_name = item check_bucket_accessible(s3_client=s3_client, bucket_name=bucket_name) try: objects = s3_client.list_objects(Bucket=bucket_name, Prefix=prefix, Delimiter='/') if not any((objects.get('CommonPrefixes'), (objects.get('Contents')))): exit('Nothing found in: {0}'.format(item[:-1])) common_prefixes = objects.get('CommonPrefixes', list()) folders = list() for first_bit in common_prefixes: folders.append(first_bit) output_s3_list(objects=objects, folders=folders, item=item, bucket_name=bucket_name) except ClientError as ce: if 'NoSuchBucket' in ce.response['Error']['Code']: exit('Bucket not found.') else: exit('Unhandled error: {0}'.format(ce.response['Error']['Code']))
def s3_cp(aws_config=None, source=None, dest=None): """ @type aws_config: Config @type source: unicode @type dest: unicode """ from acli.utils import (is_readable) from boto3.s3.transfer import S3Transfer, TransferConfig import os config = TransferConfig( multipart_threshold=200 * 1024 * 1024, max_concurrency=10, num_download_attempts=10, ) s3_prefix = 's3://' s3_client = get_client(client_type='s3', config=aws_config) if source.startswith(s3_prefix) and not dest.startswith(s3_prefix): # COPYING FROM S3 TO LOCAL s3_location = source[5:].split('/') bucket_name = s3_location[0] s3_source = '/'.join(s3_location[1:]) check_bucket_accessible(s3_client=s3_client, bucket_name=bucket_name) if dest == '/': dest = '{0}{1}'.format(os.path.abspath(dest), s3_location[-1]) elif dest == '.' or dest.endswith('/'): dest = '{0}/{1}'.format(os.path.abspath(dest), s3_location[-1]) elif os.path.isdir(os.path.abspath(dest)): dest = '{0}/{1}'.format(dest, s3_location[-1]) s3_file_md5 = get_s3_file_md5(s3_client=s3_client, bucket_name=bucket_name, path=s3_source) if s3_file_md5: if s3_file_md5 == get_local_file_md5(dest): exit('Not transferring as identical file already exists.') else: exit('Cannot find: {0}/{1}'.format(bucket_name, s3_source)) transfer = S3Transfer(s3_client, config) try: print('Transferring: {0} to: {1}'.format(source, dest)) transfer.download_file(bucket_name, s3_source, dest) except BaseException as e: if hasattr(e, 'strerror') and e.strerror == 'Permission denied': exit('Permission denied.') else: print('Unhandled exception: {0}'.format(e)) elif source.startswith(s3_prefix) and dest.startswith(s3_prefix): # COPYING FROM S3 TO S3 print('Transferring: {0} to: {1}'.format(source, dest)) exit('Not yet implemented.') elif not source.startswith(s3_prefix) and dest.startswith(s3_prefix): try: # COPYING ITEM(S) FROM LOCAL TO S3 if os.path.isdir(source): exit('File transfers only for now.') else: # COPY LOCAL FILE TO S3 if not is_readable(source): exit('Cannot access: {0}'.format(source)) s3_location = dest[5:].split('/') bucket_name = s3_location[0] s3_dest = '/'.join(s3_location[1:]) # COPYING FILE TO A FOLDER if dest.endswith('/'): file_name = source.split('/')[-1] s3_dest += file_name check_bucket_accessible(s3_client=s3_client, bucket_name=bucket_name) # CHECK IF FILES ARE IDENTICAL s3_file_md5 = get_s3_file_md5(s3_client=s3_client, bucket_name=bucket_name, path=s3_dest) # If it's mulipart, then don't bother checking and just transfer if s3_file_md5 and '-' not in s3_file_md5: local_file_md5 = get_local_file_md5(path=source) if local_file_md5 == s3_file_md5: exit('Not transferring as identical file already exists.') print('Transferring: {0} to: {1}'.format(source, dest)) transfer = S3Transfer(s3_client, config) transfer.upload_file(source, bucket_name, s3_dest) except ClientError as ce: if 'AccessDenied' in ce.response['Error']['Code']: exit('Access denied. Please check permissions.') except Exception as e: print('Unhandled exception: {0}'.format(e)) else: exit('Source or dest must be an S3 location defined with s3://.') exit()