def main(argv): ''' Main ''' options = parse_args() if options.debug: logger.setLevel(logging.DEBUG) logger.debug('Log level DEBUG') if 'AWS_PROFILE' in os.environ: conn = ec2.connect_to_region( options.region, profile_name=os.environ['AWS_PROFILE']) elif 'AWS_ACCESS_KEY_ID' in os.environ and 'AWS_SECRET_ACCESS_KEY' in os.environ: conn = ec2.connect_to_region( options.region, aws_access_key_id=os.environ['AWS_ACCESS_KEY_ID'], aws_secret_access_key=os.environ['AWS_SECRET_ACCESS_KEY']) else: logger.error("Missing AWS credentials. Check your environment.") sys.exit(1) if options.dry_run: logger.info("Dry run mode, just listing") purge_ebs_volumes(conn, options.days, options.regex, options.dry_run) if options.dry_run: logger.info("Dry run mode, no volume has been removed")
def get_instances_by_region(self, region): ''' Makes an AWS EC2 API call to the list of instances in a particular region ''' try: if self.eucalyptus: conn = boto.connect_euca(host=self.eucalyptus_host) conn.APIVersion = '2010-08-31' else: if self.creds == {}: conn = ec2.connect_to_region(region) else: conn = ec2.connect_to_region(region, aws_access_key_id=self.creds['key'], aws_secret_access_key=self.creds['secret']) # connect_to_region will fail "silently" by returning None if the region name is wrong or not supported if conn is None: print("region name: %s likely not supported, or AWS is down. connection to region failed." % region) sys.exit(1) reservations = conn.get_all_instances() for reservation in reservations: for instance in reservation.instances: self.add_instance(instance, region) except boto.exception.BotoServerError, e: if not self.eucalyptus: print "Looks like AWS is down again:" print e sys.exit(1)
def getAllInstances(self): try: #尝试Key的有效性并判断属于大陆区还是海外区,并获得相应的regions列表 conn = ec2.connect_to_region('ap-southeast-1') regions = conn.get_all_regions() accountid = boto.connect_iam().get_user().arn.split(':')[4] except EC2ResponseError as e: print e instances=[] for region in regions: conn=ec2.connect_to_region(region.name) for instance in conn.get_only_instances(): ins={} ins['instance_id']=instance.id ins['name']=instance.tags.get('Name','') ins['project']=instance.tags.get('PROJECT','') ins['instance_type']=instance.instance_type ins['state']=instance.state ins['placement']=instance.placement ins['private_ip']=instance.private_ip_address ins['public_ip']=instance.ip_address ins['vpc_id']=instance.vpc_id ins['subnet_id']=instance.subnet_id ins['image_id']=instance.image_id ins['virtualization_type']=instance.virtualization_type ins['launch_time']=instance.launch_time instances.append(ins) return instances
def getAllVolumes(self): try: #尝试Key的有效性并判断属于大陆区还是海外区,并获得相应的regions列表 conn = ec2.connect_to_region('ap-southeast-1') regions = conn.get_all_regions() accountid = boto.connect_iam().get_user().arn.split(':')[4] except EC2ResponseError as e: print e volumes=[] for region in regions: #print "connect to region", region conn = ec2.connect_to_region(region.name) for volume in conn.get_all_volumes(): vol={} vol['volume_id'] = volume.id vol['type'] = volume.type vol['size'] = volume.size vol['status'] = volume.status vol['iops'] = volume.iops vol['zone'] = volume.zone vol['project'] = volume.tags.get('PROJECT','') vol_attr = volume.attach_data vol['instance_id'] = vol_attr.instance_id vol['snapshot_id'] = volume.snapshot_id vol['create_time'] = volume.create_time volumes.append(vol) return volumes
def connect_to_ec2(region='us-east-1', access_key=None, secret_key=None): """ Connect to AWS ec2 :type region: str :param region: AWS region to connect to :type access_key: str :param access_key: AWS access key id :type secret_key: str :param secret_key: AWS secret access key :returns: boto.ec2.connection.EC2Connection -- EC2 connection """ logger.info('Connecting to AWS EC2 in {}'.format(region)) if access_key: connection = ec2.connect_to_region( region, aws_access_key_id=access_key, aws_secret_access_key=secret_key) else: connection = ec2.connect_to_region(region) if not connection: logger.error('An error occurred when connecting to EC2') sys.exit(1) return connection
def main(): parser = argparse.ArgumentParser() parser.add_argument("-k", "--secrets", type=argparse.FileType('r'), help="optional file where secrets can be found") parser.add_argument("-r", "--region", dest="region", required=True, help="optional list of regions") parser.add_argument("-v", "--verbose", action="store_true", help="Supress logging messages") args = parser.parse_args() if args.secrets: secrets = json.load(args.secrets) else: secrets = None logging.basicConfig(format="%(asctime)s - %(levelname)s - %(message)s") if args.verbose: log.setLevel(logging.DEBUG) else: log.setLevel(logging.WARNING) if secrets: conn = connect_to_region( args.region, aws_access_key_id=secrets['aws_access_key_id'], aws_secret_access_key=secrets['aws_secret_access_key'] ) else: conn = connect_to_region(args.region) pool = Pool() res = conn.get_all_instances() instances = reduce(lambda a, b: a + b, [r.instances for r in res]) a_checks = [] ptr_checks = [] cname_checks = [] for i in instances: # TODO: ignore EB name = i.tags.get("Name") if not name: log.warning("%s has no Name tag, skipping...", i) continue fqdn = i.tags.get("FQDN") if not fqdn: log.warning("%s has no FQDN tag, skipping...", i) continue ip = i.private_ip_address if not ip: log.warning("%s no ip assigned, skipping...", i) continue cname = "%s.build.mozilla.org" % name a_checks.append([fqdn, ip]) ptr_checks.append([fqdn, ip]) cname_checks.append([fqdn, cname]) pool.map(check_A, a_checks) pool.map(check_PTR, ptr_checks) pool.map(check_CNAME, cname_checks) pool.close() pool.join()
def connect(self): print(_green("Connecting...")) ec2.connect_to_region("eu-west-1a") self.ec2conn = ec2.connect_to_region('eu-west-1', aws_access_key_id=self.access_key, aws_secret_access_key=self.secret_key) print(self.get_instances())
def import_s3key_to_ami(p): try: awsDiskContainer = {'Description': p.s3key, 'DiskContainers': [ { 'Description': p.s3key, 'UserBucket': { 'S3Bucket': p.s3bucket, 'S3Key': p.s3key } } ]} aws_import_command = [ 'aws', '--region', p.region, 'ec2', 'import-image', '--cli-input-json', json.dumps(awsDiskContainer)] logging.info("Running: {}".format(' '.join(aws_import_command))) importcmd_resp = subprocess.check_output(aws_import_command) except subprocess.CalledProcessError: logging.error("An error occured while execuring" " ".join(aws_import_command)) logging.debug(json.loads(importcmd_resp)) import_task_id = json.loads(importcmd_resp)['ImportTaskId'] logging.info("AWS is now importing vdmk to AMI.") while True: aws_import_status_cmd = [ 'aws', '--region', p.region, 'ec2', 'describe-import-image-tasks', '--import-task-ids', import_task_id] import_progress_resp = json.loads(subprocess.check_output(aws_import_status_cmd))['ImportImageTasks'][0] if 'Progress' not in import_progress_resp.keys() and 'ImageId' in import_progress_resp.keys(): temporary_ami = import_progress_resp['ImageId'] logging.info("Done, amiid is {}".format(temporary_ami)) break else: import_progress = import_progress_resp['Progress'] sys.stdout.write("\r%s%%" % import_progress) sys.stdout.flush() time.sleep(5) logging.info("Successfully created temporary AMI {}".format(temporary_ami)) # import-image created randon name and description. Those can't be modified. # Create copies for all regions with the right metadata instead. amis_created = {} for region in ['eu-central-1', 'us-west-2', 'us-east-1']: ec2conn = ec2.connect_to_region(region) amis_created[region] = ec2conn.copy_image(p.region, temporary_ami, name=p.s3key, description=p.s3key) print "Created {} in region {}".format(amis_created[region].image_id, region) logging.info("Deregistering temporary AMI {}".format(temporary_ami)) ec2conn = ec2.connect_to_region(p.region) ec2conn.deregister_image(temporary_ami)
def get_connection(region, secrets): if secrets: conn = connect_to_region( region, aws_access_key_id=secrets['aws_access_key_id'], aws_secret_access_key=secrets['aws_secret_access_key'] ) else: conn = connect_to_region(region) return conn
def __init__(self, region, profile, instance_tags_to_propagate, volume_tags_to_propagate, volume_tags_to_be_set, snapshot_tags_to_be_set, dryrun, append, volumes_to_tag, snapshots_to_tag, instance_filter, novolumes, nosnapshots): # This list of tags associated with an EC2 instance to propagate to # attached EBS volumes self._instance_tags_to_propagate = instance_tags_to_propagate # This is a list of tags associated with a volume to propagate to # a snapshot created from the volume self._volume_tags_to_propagate = volume_tags_to_propagate # This is a dict of tags (keys and values) which will be set on the volumes (ebs) self._volume_tags_to_be_set = volume_tags_to_be_set # This is a dict of tags (keys and values) which will be set on the snapshots self._snapshot_tags_to_be_set = snapshot_tags_to_be_set # The region to operate in self._region = region # The profile to use self._profile = profile # Whether this is a dryrun self._dryrun = dryrun # If we are appending tags self._append = append # Volumes we will tag self._volumes_to_tag = volumes_to_tag # Snapshots we will tag self._snapshots_to_tag = snapshots_to_tag # Filter instances by a given param and propagate their tags to their attached volumes self._instance_filter = instance_filter # If we process volumes self._novolumes = novolumes # If we process snapshots self._nosnapshots = nosnapshots log.info("Starting Graffiti Monkey") log.info("Options: dryrun %s, append %s, novolumes %s, nosnapshots %s", self._dryrun, self._append, self._novolumes, self._nosnapshots) log.info("Connecting to region %s using profile %s", self._region, self._profile) try: self._conn = ec2.connect_to_region(self._region, profile_name=self._profile) except boto.exception.NoAuthHandlerFound: raise GraffitiMonkeyException('No AWS credentials found - check your credentials') except boto.provider.ProfileNotFoundError: log.info("Connecting to region %s using default credentials", self._region) try: self._conn = ec2.connect_to_region(self._region) except boto.exception.NoAuthHandlerFound: raise GraffitiMonkeyException('No AWS credentials found - check your credentials')
def __init__(self, region, profile, instance_tags_to_propagate, volume_tags_to_propagate, dryrun, append, volumes_to_tag, snapshots_to_tag, novolumes, nosnapshots, network_interfaces_to_tag, no_network_interfaces, use_instance_tags): # This list of tags associated with an EC2 instance to propagate to # attached EBS volumes self._instance_tags_to_propagate = instance_tags_to_propagate # This is a list of tags associated with a volume to propagate to # a snapshot created from the volume self._volume_tags_to_propagate = volume_tags_to_propagate # The region to operate in self._region = region # The profile to use self._profile = profile # Whether this is a dryrun self._dryrun = dryrun # If we are appending tags self._append = append # Volumes we will tag self._volumes_to_tag = volumes_to_tag # Snapshots we will tag self._snapshots_to_tag = snapshots_to_tag # Network interfaces we will tag self._network_interfaces_to_tag = network_interfaces_to_tag # If we process volumes self._novolumes = novolumes # If we process snapshots self._nosnapshots = nosnapshots # If we process network interfaces self._no_network_interfaces = no_network_interfaces self._use_instance_tags = use_instance_tags log.info("Starting Graffiti Monkey") log.info("Options: dryrun %s, append %s, novolumes %s, nosnapshots %s, use_instance_tags %s", self._dryrun, self._append, self._novolumes, self._nosnapshots, self._use_instance_tags) log.info("Connecting to region %s using profile %s", self._region, self._profile) try: self._conn = ec2.connect_to_region(self._region, profile_name=self._profile) except boto.exception.NoAuthHandlerFound: raise GraffitiMonkeyException('No AWS credentials found - check your credentials') except boto.provider.ProfileNotFoundError: log.info("Connecting to region %s using default credentials", self._region) try: self._conn = ec2.connect_to_region(self._region) except boto.exception.NoAuthHandlerFound: raise GraffitiMonkeyException('No AWS credentials found - check your credentials')
def get_conn_env(): env_list = ["demo", "prod","test"] if len(sys.argv) > 1: env = sys.argv[1] if env_list.count(sys.argv[1]) > 0: ec2_conn = ec2.connect_to_region("eu-west-1", profile_name = env) else: print "You need to input the right $ENV." exit() elif len(sys.argv) == 1: ec2_conn = ec2.connect_to_region("eu-west-1") my_reserves = ec2_conn.get_all_reservations() return my_reserves
def provision(): if exec_ctx == 'spark_ec2': eggo.spark_ec2.provision() elif exec_ctx == 'director': eggo.director.provision() # at this point, get_master() should be valid # if the DFS is on the local fs, the directories may need to be created url = urlparse(eggo_config.get('dfs', 'dfs_root_url')) if url.scheme == 'file': local('mkdir -p {0}'.format(url.path)) url = urlparse(eggo_config.get('dfs', 'dfs_raw_data_url')) local('mkdir -p {0}'.format(url.path)) url = urlparse(eggo_config.get('dfs', 'dfs_tmp_data_url')) local('mkdir -p {0}'.format(url.path)) # tag all the provisioned instances if exec_ctx in ['spark_ec2', 'director']: conn = connect_to_region(eggo_config.get(exec_ctx, 'region')) instances = conn.get_only_instances( filters={'key-name': [eggo_config.get('aws', 'ec2_key_pair')]}) for instance in instances: instance.add_tag('owner', getuser()) instance.add_tag('stack_name', eggo_config.get(exec_ctx, 'stack_name'))
def get_instances_by_region(self, region): """ Makes an AWS EC2 API call to the list of instances in a particular region """ try: if self.eucalyptus: conn = boto.connect_euca(host=self.eucalyptus_host) conn.APIVersion = "2010-08-31" else: conn = ec2.connect_to_region(region) # connect_to_region will fail "silently" by returning None if the region name is wrong or not supported if conn is None: print ("region name: %s likely not supported, or AWS is down. connection to region failed." % region) sys.exit(1) reservations = [] if self.ec2_instance_filters: for filter_key, filter_values in self.ec2_instance_filters.iteritems(): reservations.extend(conn.get_all_instances(filters={filter_key: filter_values})) else: reservations = conn.get_all_instances() for reservation in reservations: for instance in reservation.instances: self.add_instance(instance, region) except boto.exception.BotoServerError, e: if not self.eucalyptus: print "Looks like AWS is down again:" print e sys.exit(1)
def __init__(self): """ Initialize the EC2 connection. """ self.conn = ec2.connect_to_region(os.environ['OSP_REGION'])
def get_all_reserved_instances(): from boto.ec2 import connect_to_region import time # Setting global variables envn = 'AWSProfileAccount' # this is used to call the proper .aws profile for credentials region = 'us-east-1' ec2conn = connect_to_region(region, profile_name=envn) ri_report = open(report_home + '/all_reserved_instances_' + time.strftime("%Y%m%d" + ".txt"), 'w') total_instance_types = 0 total_reserved_instances = 0 try: reservations = ec2conn.get_all_reserved_instances() # return all AWS instances mydict = {} for res in reservations: if res.state == "active": mydict[res.id] = {'type': res.instance_type, 'count': res.instance_count, 'offer': res.offering_type, 'platform': res.description} total_reserved_instances += 1 if res.state == "active": total_instance_types += res.instance_count ordered_dict = sorted(mydict.itervalues(), key=lambda x: x['type']) ri_report.write(tabulate.tabulate(ordered_dict, headers="keys")) ri_report.write("\n\n" + "Total Reserved Instance Types: " + str(total_reserved_instances) + "\n") ri_report.write("Total Reserved Instances: " + str(total_instance_types) + "\n") except Exception, e: print e
def aws_provisioner( access_key, secret_access_token, keyname, region, zone, security_groups, instance_type=b"m3.large", session_token=None, ): """ Create an IProvisioner for provisioning nodes on AWS EC2. :param bytes access_key: The access_key to connect to AWS with. :param bytes secret_access_token: The corresponding secret token. :param bytes region: The AWS region in which to launch the instance. :param bytes zone: The AWS zone in which to launch the instance. :param bytes keyname: The name of an existing ssh public key configured in AWS. The provision step assumes the corresponding private key is available from an agent. :param list security_groups: List of security groups to put created nodes in. :param bytes instance_type: AWS instance type for cluster nodes. :param bytes session_token: The optional session token, if required for connection. """ conn = connect_to_region( region, aws_access_key_id=access_key, aws_secret_access_key=secret_access_token, security_token=session_token, ) if conn is None: raise ValueError("Invalid region: {}".format(region)) return AWSProvisioner( _connection=conn, _keyname=keyname, _security_groups=security_groups, _zone=zone, _default_size=instance_type, )
def terminate(): if len(sys.argv) < 3: print "Invalid Usage: try \"python terminate_instance.py <credentials> <instance_id_to_terminate> \" " return None instance_id = sys.argv[2] d = {} file_name = sys.argv[1] f = open(file_name) for line in f: (k, v) = line.split() d[k] = v region = d['region'] key_id = d['key_id'] access_key = d['access_key'] conn = ec2.connect_to_region(region, aws_access_key_id=key_id, aws_secret_access_key=access_key) l_t = [ ] l_t = conn.terminate_instances(instance_ids=[instance_id]) print l_t for instance in l_t: if instance_id == str(instance.id): print "Instance " + instance_id + " successfully terminated" break
def get_by_nametag(cls, instancename_with_optional_region): """ Connect to AWS and get the EC2 instance with the given Name-tag. :param instancename_with_optional_region: Parsed with :func:`parse_instancename` to find the region and name. :raise Ec2RegionConnectionError: If connecting to the region fails. :raise InstanceLookupError: Or one of its subclasses if the requested instance was not found in the region. :return: A :class:`Ec2InstanceWrapper` contaning the requested instance. """ region, name = parse_instancename(instancename_with_optional_region) connection = connect_to_region(region_name=region, **awsfab_settings.AUTH) if not connection: raise Ec2RegionConnectionError(region) reservations = connection.get_all_instances(filters={'tag:Name': name}) if len(reservations) == 0: raise NoInstanceWithNameFound('No ec2 instances with tag:Name={0}'.format(name)) if len(reservations) > 1: raise MultipleInstancesWithSameNameError('More than one ec2 reservations with tag:Name={0}'.format(name)) reservation = reservations[0] if len(reservation.instances) != 1: raise NotExactlyOneInstanceError('Did not get exactly one instance with tag:Name={0}'.format(name)) return cls(reservation.instances[0])
def _wait_pending_instances(self, machine_options, instances): """Wait until instance is running. Parameters: region: (str) AWS region of instances defined above. Return: (list of boto.ec2.instance) Instances with up-to-date status. """ conn = ec2.connect_to_region(machine_options.region, aws_access_key_id=self._access_key_id, aws_secret_access_key=self._secret_access_key) done = False first_time = True ids = [i.id for i in instances] while not done: all_instances = [] for r in conn.get_all_instances(ids): all_instances.extend(r.instances) done = all(i.state == 'running' for i in all_instances) if not done: if first_time: print ("Waiting for %d instance(s) to start..." % len(ids)), first_time = False time.sleep(1) if not first_time: print 'done' return all_instances
def _get_instances(self, machine_options): """Get all instances used for cluster test. Parameters: region: (str) AWS region of instances defined above. Return: (list of boto.ec2.instance) """ conn = ec2.connect_to_region(machine_options.region, aws_access_key_id=self._access_key_id, aws_secret_access_key=self._secret_access_key) filters = {'tag:' + CLUSTER_TEST_KEY: CLUSTER_TEST_VALUE, 'tag:' + TEST_NAME_KEY: self.test_name, 'tag:' + MACHINE_TYPE_KEY: machine_options.machine_type, 'image_id' : machine_options.ami } instances = [] for reservations in conn.get_all_instances(filters=filters): for i in reservations.instances: if i.state == 'pending' or i.state == 'running': instances.append(i) return instances
def discover_sg_rules(self,sg): region='us-east-1' egress = {} ingress = {} try: AWS_ACCESS_KEY_ID,AWS_SECRET_ACCESS_KEY=self.find_aws_creds() except: logger.info("Error: {0}: Cannot retrieve boto credentials".format('self.discover_sg_rules')) conn_ec2 = connect_to_region(region, aws_access_key_id=AWS_ACCESS_KEY_ID, aws_secret_access_key=AWS_SECRET_ACCESS_KEY) rules = conn_ec2.get_all_security_groups(group_ids=sg)[0].rules i=0 for j in range(len(rules)): for k in range(len(rules[j].grants)): ingress[i] = { 'IpProtocol': rules[j].ip_protocol, 'FromPort':rules[j].from_port, 'ToPort':rules[j].to_port, 'CidrIp': str(rules[j].grants[k]) } i += 1 rules = conn_ec2.get_all_security_groups(group_ids=sg)[0].rules_egress i=0 for j in range(len(rules)): for k in range(len(rules[j].grants)): egress[i] = { 'IpProtocol': rules[j].ip_protocol, 'FromPort':rules[j].from_port, 'ToPort':rules[j].to_port, 'CidrIp':str(rules[j].grants[k]) } i += 1 return egress,ingress
def terminate_all(self): """Terminate all instances for this cluster test. This function is a part of the public interface of provisioner. """ regional_instances = self._get_all_instances(group_by_region=True) if len(regional_instances.items()) == 0: print("No running instances to terminate.") return for region, instances in regional_instances.items(): if len(instances) == 0: print "No running instances for test in region %s." % region continue print ("Will terminate %d instance(s) in %s... " % (len(instances), region)), conn = ec2.connect_to_region(region, aws_access_key_id=self._access_key_id, aws_secret_access_key=self._secret_access_key) conn.terminate_instances([i.id for i in instances]) print "done." self._delete_s3_content(self.test_name)
def _aws_connection(_region, _access_key, _secret_key, _token, _conn_type): conn = None if conn_type == 'ec2': conn = ec2.connect_to_region( _region, aws_access_key_id=_access_key, aws_secret_access_key=_secret_key, security_token=_token) elif conn_type == 'autoscale': conn = ec2.autoscale.connect_to_region( _region, aws_access_key_id=_access_key, aws_secret_access_key=_secret_key, security_token=_token) elif conn_type == 'cloudwatch': conn = ec2.cloudwatch.connect_to_region( _region, aws_access_key_id=_access_key, aws_secret_access_key=_secret_key, security_token=_token) elif conn_type == 'cloudformation': conn = boto.cloudformation.connect_to_region( _region, aws_access_key_id=_access_key, aws_secret_access_key=_secret_key, security_token=_token) elif conn_type == 's3': conn = boto.connect_s3( # Don't specify region when connecting to S3 aws_access_key_id=_access_key, aws_secret_access_key=_secret_key, security_token=_token) elif conn_type == 'elb': conn = ec2.elb.connect_to_region( _region, aws_access_key_id=_access_key, aws_secret_access_key=_secret_key, security_token=_token) elif conn_type == 'vpc': conn = vpc.connect_to_region( _region, aws_access_key_id=_access_key, aws_secret_access_key=_secret_key, security_token=_token) elif conn_type == 'iam': return None if conn: conn.https_validate_certificates = validate_certs return conn
def discover(self,regions): objects = {} try: AWS_ACCESS_KEY_ID,AWS_SECRET_ACCESS_KEY=self.find_aws_creds() except: logger.info("Error: {0}: Cannot retrieve boto credentials".format('self.discover')) sys.exit() for r in regions: conn_ec2 = connect_to_region(r, aws_access_key_id=AWS_ACCESS_KEY_ID, aws_secret_access_key=AWS_SECRET_ACCESS_KEY) sgs = conn_ec2.get_all_security_groups() for sg in sgs: objects[sg.id] = {} #Add Egress/Ingress rules to objects. We're already pulling # the data might as well pass it around and save on a later # API call #print 'Egress:', sg.rules_egress #print 'Ingress:', sg.rules try: objects[sg.id]['vpc_id'] = sg.vpc_id except KeyError: objects[sg.id]['vpc_id'] = 'Empty' try: objects[sg.id]['name'] = sg.name except KeyError: objects[sg.id]['name'] = 'Empty' try: objects[sg.id]['tags'] = sg.tags except KeyError: objects[sg.id]['tags'] = {} return objects
def get_by_tagvalue(cls, tags={}, region=None): """ Connect to AWS and get the EC2 instance with the given tag:value pairs. :param tags A string like 'role=testing,fake=yes' to AND a set of ec2 instance tags :param region: optional. :raise Ec2RegionConnectionError: If connecting to the region fails. :raise LookupError: If no matching instance was found in the region. :return: A list of :class:`Ec2InstanceWrapper`s containing the matching instances. """ region = region is None and awsfab_settings.DEFAULT_REGION or region connection = connect_to_region(region_name=region, **awsfab_settings.AUTH) if not connection: raise Ec2RegionConnectionError(region) tags = dict((('tag:%s' % oldk, v) for (oldk, v) in tags.iteritems())) reservations = connection.get_all_instances(filters=tags) if len(reservations) == 0: raise LookupError('No ec2 instances with tags{0}'.format(tags)) insts = [] for r in reservations: for instance in r.instances: insts.append(cls(instance)) return insts
def aws_provisioner(access_key, secret_access_token, keyname, region, zone, security_groups): """ Create an IProvisioner for provisioning nodes on AWS EC2. :param bytes access_key: The access_key to connect to AWS with. :param bytes secret_access_token: The corresponding secret token. :param bytes region: The AWS region in which to launch the instance. :param bytes zone: The AWS zone in which to launch the instance. :param bytes keyname: The name of an existing ssh public key configured in AWS. The provision step assumes the corresponding private key is available from an agent. :param list security_groups: List of security groups to put created nodes in. """ conn = connect_to_region( region, aws_access_key_id=access_key, aws_secret_access_key=secret_access_token, ) return AWSProvisioner( _connection=conn, _keyname=keyname, _security_groups=security_groups, _zone=zone, _default_size=b"m3.large", )
def getLongRunningEC2Instances(region, awsAccessKeyId, awsSecretAccessKey, instanceAge): """ Returns the list of long running ec2 instances region : AWS region to query awsAccessKeyId : aws_access_key_id awsSecretAccessKey : aws_secret_access_key instanceAge : Accepted in number of days e.g. getLongRunningEC2Instances("us-west-2", os.environ["AWS_ACCESS_KEY_ID"], os.environ["AWS_SECRET_ACCESS_KEY"], 15) """ ec2Conn = ec2.connect_to_region( region_name=region, aws_access_key_id=awsAccessKeyId, aws_secret_access_key=awsSecretAccessKey) instances = ec2Conn.get_only_instances() runningInstances = filter(lambda instance: instance.state == "running", instances) longRunningInstances = [] for instance in runningInstances: nowutc = datetime.now(tzutc()) launchTime = dateutil.parser.parse(instance.launch_time) activeTime = nowutc - launchTime if activeTime.days > instanceAge: longRunningInstances.append(instance) return longRunningInstances
def ec2_client(region, zone, access_key_id, secret_access_key): """ Establish connection to EC2 client. :param str region: The name of the EC2 region to connect to. :param str zone: The zone for the EC2 region to connect to. :param str access_key_id: "aws_access_key_id" credential for EC2. :param str secret_access_key: "aws_secret_access_key" EC2 credential. :return: An ``_EC2`` giving information about EC2 client connection and EC2 instance zone. """ # Set 2 retry knobs in Boto to BOTO_NUM_RETRIES: # 1. ``num_retries``: # Request automatic exponential backoff and retry # attempts by Boto if an EC2 API call fails with # ``RequestLimitExceeded`` due to system load. # 2. ``metadata_service_num_attempts``: # Request for retry attempts by Boto to # retrieve data from Metadata Service used to retrieve # credentials for IAM roles on EC2 instances. if not config.has_section('Boto'): config.add_section('Boto') config.set('Boto', 'num_retries', BOTO_NUM_RETRIES) config.set('Boto', 'metadata_service_num_attempts', BOTO_NUM_RETRIES) # Get Boto EC2 connection with ``EC2ResponseError`` logged by Eliot. connection = ec2.connect_to_region(region, aws_access_key_id=access_key_id, aws_secret_access_key=secret_access_key) return _EC2(zone=zone, connection=_LoggedBotoConnection(connection=connection))
def get_connection(self): ret = None if self._cross_account_number and self._cross_account_role: self._info( subject=_status.parse_status('cross_account_connect', (self._cross_account_number, self._cross_account_role, self._region)), src_account=self._cross_account_number, src_role=self._cross_account_role, category='connection') from boto.sts import STSConnection import boto try: role_arn = 'arn:aws:iam::%s:role/%s' % (self._cross_account_number, self._cross_account_role) sts = STSConnection() assumed_role = sts.assume_role(role_arn=role_arn, role_session_name='AssumeRoleSession') ret = ec2.connect_to_region( self._region, aws_access_key_id=assumed_role.credentials.access_key, aws_secret_access_key=assumed_role.credentials.secret_key, security_token=assumed_role.credentials.session_token ) except BotoServerError, e: raise BackupMonkeyException('%s: %s' % (_status.parse_status('cross_account_error'), e.message), subject=_status.parse_status('cross_account_error'), body=e.message, src_account=self._cross_account_number, src_role=self._cross_account_role, category='connection')
def echo_ip(): """ echo_ip - report Public IP of AWS Packaging Server - run local :return: """ ec2conn = connect_to_region('us-west-2', aws_access_key_id=AWS_ACCESS_KEY_ID, aws_secret_access_key=AWS_SECRET_ACCESS_KEY) reservations = ec2conn.get_all_instances() #print reservations.AWS_INSTANCE_ID instances = [i for r in reservations for i in r.instances] for i in instances: if i.id == AWS_INSTANCE_ID: pprint(i.ip_address)
def create_snapshots(backup_conf): ec2_connection = ec2.connect_to_region( aws_region, aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key) for volume, params in backup_conf.get().iteritems(): logging.info( kayvee.formatLog( "ebs-snapshots", "info", "about to take ebs snapshot {} - {}".format(volume, params))) interval = params.get('interval', 'daily') max_snapshots = params.get('max_snapshots', 0) name = params.get('name', '') snapshot_manager.run(ec2_connection, volume, interval, max_snapshots, name)
def _get_conn(region): """Return EC2 connection Args: region: the name of region to connect to """ ### TODO: cache connection try: aws_access_key_id = environ['AWS_ACCESS_KEY_ID'] aws_secret_access_key = environ['AWS_SECRET_ACCESS_KEY'] except KeyError: raise ValueError('Please set your environment variables: ' 'AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY') return ec2.connect_to_region(region, aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key)
def ec2_connect(access_key, secret_key, region): """ Connects to EC2, returns a connection object """ try: conn = ec2.connect_to_region(region, aws_access_key_id=access_key, aws_secret_access_key=secret_key) except Exception: sys.stderr.write('Could not connect to region: %s. Exception: %s\n' % (region)) conn = None return conn
def get_instance(self, region, instance_id): ''' Gets details about a specific instance ''' conn = ec2.connect_to_region(region) # connect_to_region will fail "silently" by returning None if the region name is wrong or not supported if conn is None: print( "region name: %s likely not supported, or AWS is down. connection to region failed." % region) sys.exit(1) reservations = conn.get_all_instances([instance_id]) for reservation in reservations: for instance in reservation.instances: return instance
def launch_instance(): conn = connect_to_region('eu-west-1') reservation = conn.run_instances(BASE_AMI_ID, key_name=KEYPAIR, instance_type=INSTANCE_TYPE, security_groups=SECURITY_GROUPS) instance = reservation.instances[0] while instance.state != 'running': print('Waiting for instance to start') sleep(10) instance.update() print('Instance started') return conn, instance
def __init__(self): # TODO load ec2 credentials from config file creds = config.get_ec2_conf() self.conn = None self.access_key = creds['AWS_ACCESS_KEY_ID'] self.secret_key = creds['AWS_SECRET_ACCESS_KEY'] self.region = 'us-east-1' self.account_id = creds['ACCOUNT_ID'] # create a connection to aws to pass around self.conn = connect_to_region(self.region, aws_access_key_id=self.access_key, aws_secret_access_key=self.secret_key) self.SecurityGroupRule = config.SecurityGroupRule
def launch_EC2_instance(): conn = ec2.connect_to_region('us-east-1') # Key pair creation and save key-pair if os.path.isfile('./MyKeyPairNew.pem') is False: kp = conn.create_key_pair('MyKeyPairNew') kp.save('./') # Create security group for HTTP, SSH and Pinging security_groups = -1 try: security_groups = conn.get_all_security_groups(groupnames = ['csc326-groupg326-1-007']) security_groups = security_groups[0] except: security_groups = conn.create_security_group('csc326-groupg326-1-007', 'CSC326 project group g326-1-007') # To ping the server # To allow SSH # To allow HTTP security_groups.authorize('ICMP', from_port=-1, to_port=-1, cidr_ip = '0.0.0.0/0') security_groups.authorize('TCP', from_port=22, to_port=22, cidr_ip = '0.0.0.0/0') security_groups.authorize('TCP', from_port=80, to_port=80, cidr_ip = '0.0.0.0/0') # Get security groups sec_groups = conn.get_all_security_groups() #print sec_groups[0] # Launch a new instance: res = conn.run_instances('ami-b93164ae', key_name='MyKeyPairNew', instance_type = 't2.micro', security_groups=sec_groups) # allocate a static ip address to the instance addrs = conn.get_all_addresses() addr = 0 if len(addrs) == 0: addr = conn.allocate_address() else: addr = addrs[0] instances = res.instances inst = instances[0] print "Instance status: " + inst.state while inst.state != "running": time.sleep(10) inst.update() addr.associate(instance_id=inst.id) return addr, inst.id
def wait_for_cluster_state(conn, opts, cluster_instances, cluster_state): """ Wait for all the instances in the cluster to reach a designated state. cluster_instances: a list of boto.ec2.instance.Instance cluster_state: a string representing the desired state of all the instances in the cluster value can be 'ssh-ready' or a valid value from boto.ec2.instance.InstanceState such as 'running', 'terminated', etc. (would be nice to replace this with a proper enum: http://stackoverflow.com/a/1695250) """ sys.stdout.write( "Waiting for cluster to enter '{s}' state.".format(s=cluster_state)) sys.stdout.flush() start_time = datetime.now() num_attempts = 0 conn = ec2.connect_to_region(opts.region) while True: time.sleep(5 * num_attempts) # seconds for i in cluster_instances: i.update() statuses = conn.get_all_instance_status( instance_ids=[i.id for i in cluster_instances]) if cluster_state == 'ssh-ready': if all(i.state == 'running' for i in cluster_instances) and \ all(s.system_status.status == 'ok' for s in statuses) and \ all(s.instance_status.status == 'ok' for s in statuses) and \ is_cluster_ssh_available(cluster_instances, opts): break else: if all(i.state == cluster_state for i in cluster_instances): break num_attempts += 1 sys.stdout.write(".") sys.stdout.flush() sys.stdout.write("\n") end_time = datetime.now() print "Cluster is now in '{s}' state. Waited {t} seconds.".format( s=cluster_state, t=(end_time - start_time).seconds)
def create_snapshots(backup_conf): ec2_connection = ec2.connect_to_region(aws_region) ec2_backup_client = boto3.client("ec2", region_name=aws_backup_region) for volume, params in backup_conf.get().iteritems(): logging.info( kayvee.formatLog( "ebs-snapshots", "info", "about to evaluate ebs snapshots for {} - {}".format( volume, params), data={})) interval = params.get('interval', 'daily') max_snapshots = params.get('max_snapshots', 0) name = params.get('name', '') snapshot_manager.run(ec2_connection, ec2_backup_client, volume, interval, max_snapshots, name)
def provision_spot(regionName, num): global AMIs conn = ec2.connect_to_region(regionName) try: conn.create_placement_group(args.placement_group) except: print "Placement group exception " + args.placement_group reservations = conn.request_spot_instances( 1.5, AMIs[regionName], count=num, instance_type="cr1.8xlarge", key_name="kaiju", placement_group=args.placement_group) '''
def _aws_connection(_region, _access_key, _secret_key, _token, _conn_type): conn = None if conn_type == 'ec2': conn = ec2.connect_to_region(_region, aws_access_key_id=_access_key, aws_secret_access_key=_secret_key, security_token=_token) elif conn_type == 'autoscale': conn = ec2.autoscale.connect_to_region( _region, aws_access_key_id=_access_key, aws_secret_access_key=_secret_key, security_token=_token) elif conn_type == 'cloudwatch': conn = ec2.cloudwatch.connect_to_region( _region, aws_access_key_id=_access_key, aws_secret_access_key=_secret_key, security_token=_token) elif conn_type == 'cloudformation': conn = boto.cloudformation.connect_to_region( _region, aws_access_key_id=_access_key, aws_secret_access_key=_secret_key, security_token=_token) elif conn_type == 's3': conn = boto.connect_s3( # Don't specify region when connecting to S3 aws_access_key_id=_access_key, aws_secret_access_key=_secret_key, security_token=_token) elif conn_type == 'elb': conn = ec2.elb.connect_to_region( _region, aws_access_key_id=_access_key, aws_secret_access_key=_secret_key, security_token=_token) elif conn_type == 'vpc': conn = vpc.connect_to_region(_region, aws_access_key_id=_access_key, aws_secret_access_key=_secret_key, security_token=_token) elif conn_type == 'iam': return None if conn: conn.https_validate_certificates = validate_certs return conn
def stop(self): conn = ec2.connect_to_region(self.region) master_nodes, slave_nodes = get_existing_cluster(conn, self.opts, self.cluster_name, die_on_error=False) print "Stopping master..." for inst in master_nodes: if inst.state not in ["shutting-down", "terminated"]: inst.stop() print "Stopping slaves..." for inst in slave_nodes: if inst.state not in ["shutting-down", "terminated"]: if inst.spot_instance_request_id: inst.terminate() else: inst.stop()
def getProdNodes(region="us-west-1"): conn = ec2.connect_to_region('us-west-1') filters = [{'Name': 'instance-state-name', 'Values': ['running']}] reservations = conn.get_all_instances() instances = [i for r in reservations for i in r.instances] ret = [] for instance in instances: if (instance.__dict__['tags']['Name'] == "app-prod-2") or (instance.__dict__['tags']['Name'] == "app-prod-1"): bname = instance.__dict__['tags'][ 'Name'] #bname is short for beanstalk name iname = instance.id.rsplit( '-', 1)[1] #iname is short for instance id name hostname = 'collectd-' + bname + '-' + iname + "-p-usw1-local-com" ret.append(hostname) return ret
def _get_conn(region): """Return EC2 connection Args: region: region to connect to """ try: aws_access_key_id = environ['AWS_ACCESS_KEY_ID'] aws_secret_access_key = environ['AWS_SECRET_ACCESS_KEY'] except KeyError: raise ValueError('Please set your environment variables: ' 'AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY') if region not in _get_conn.connections: _get_conn.connections[region] = ec2.connect_to_region( region, aws_access_key_id=AWS_ACCESS_KEY_ID, aws_secret_access_key=AWS_SECRET_ACCESS_KEY) return _get_conn.connections[region]
def main(): module = AnsibleModule( argument_spec = dict( ec2_id = dict(required=True), group_names = dict(required=True), region = dict(required=True))) connection = connect_to_region(module.params.get("region")) ec2_id = module.params.get("ec2_id") group_names = module.params.get("group_names") group_ids = set([group.id for group in connection.get_all_security_groups(group_names)]) current_group_ids = set([group.id for group in connection.get_instance_attribute(ec2_id, "groupSet")["groupSet"]]) if connection.modify_instance_attribute(ec2_id, "groupSet", current_group_ids.union(group_ids)): current_groups = connection.get_instance_attribute(ec2_id, "groupSet")["groupSet"] module.exit_json(changed=True, groups=[group.id for group in current_groups]) else: module.fail_json(msg="Could not update groups")
def get_instance_ips(): conn = ec2.connect_to_region('eu-west-1') tag_name = '{} PaaS-dockerhosts'.format(environment) reservations = conn.get_all_instances(filters={ 'tag:Name': tag_name, 'instance-state-name': 'running' }) all_instances = [ instance for reservation in reservations for instance in reservation.instances ] instance_ips = [] for instance in all_instances: instance_ips.append(instance.__dict__['private_ip_address']) return instance_ips
def create(self): conn = ec2.connect_to_region( self.region, aws_access_key_id=self.access_key, aws_secret_access_key=self.secret_key ) try: keypair = conn.create_key_pair(self.key_name) except EC2ResponseError as e: if e.error_code == 'InvalidKeyPair.Duplicate': conn.delete_key_pair(key_name=self.key_name) keypair = conn.create_key_pair(self.key_name) else: raise e keypair.save(self.user_path)
def get_instance(self, region, instance_id): ''' Gets details about a specific instance ''' if self.eucalyptus: conn = boto.connect_euca(self.eucalyptus_host) conn.APIVersion = '2010-08-31' else: conn = ec2.connect_to_region(region) # connect_to_region will fail "silently" by returning None if the region name is wrong or not supported if conn is None: print(("region name: %s likely not supported, or AWS is down. connection to region failed." % region)) sys.exit(1) reservations = conn.get_all_instances([instance_id]) for reservation in reservations: for instance in reservation.instances: return instance
def get_ec2_connection(aws_access_key_id, aws_secret_access_key, region_name): """ :param aws_access_key: if None, we will use the .aws/config on this system :param aws_secret_key: if None we wil use the .aws/config on this system :param region_name: This is a region string i.e. us-east-1 :return: an ec2 connection object :rtype: EC2Connection """ conn = ec2.connect_to_region(aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, region_name=region_name) if conn is None: raise Exception( "Could not get ec2 connection to region {}, invalid credentials.". format(region_name)) return conn
def get_instances_by_region(self, region): ''' Makes an AWS EC2 API call to the list of instances in a particular region ''' try: cfg = Config() cfg.load_credential_file(os.path.expanduser("~/.aws/credentials")) cfg.load_credential_file(os.path.expanduser("~/.aws/config")) session_token = cfg.get(self.boto_profile, "aws_session_token") conn = ec2.connect_to_region(region, security_token=session_token, profile_name=self.boto_profile) # connect_to_region will fail "silently" by returning None if the # region name is wrong or not supported if conn is None: print("region name: {} likely not supported, or AWS is down. " "connection to region failed.".format(region)) sys.exit(1) reservations = conn.get_all_instances(filters=self.filters) bastion_ip = self.find_bastion_box(conn) instances = [] for reservation in reservations: instances.extend(reservation.instances) # sort the instance based on name and index, in this order def sort_key(instance): name = instance.tags.get('Name', '') return "{}-{}".format(name, instance.id) for instance in sorted(instances, key=sort_key): self.add_instance(bastion_ip, instance, region) except boto.provider.ProfileNotFoundError as e: raise Exception( "{}, configure it with 'aws configure --profile {}'".format( e.message, self.boto_profile)) except boto.exception.BotoServerError as e: print(e) sys.exit(1)
def wait_volume(conn, volume, expected_status): volume_status = 'waiting' sleep_seconds = 2 sleep_intervals = 30 for counter in range(sleep_intervals): print 'waiting for volume - elapsed: %s. status: %s.' % ( sleep_seconds * counter, volume_status) conn = ec2.connect_to_region('us-east-1') volume_status = conn.get_all_volumes(volume_ids=[volume.id])[0].status if volume_status == expected_status: break time.sleep(sleep_seconds) if volume_status != expected_status: raise Exception('Unable to get %s status for volume %s' % (expected_status, volume.id)) print 'volume now in %s state' % expected_status
def start(self): conn = ec2.connect_to_region(self.region) master_nodes, slave_nodes = get_existing_cluster(conn, self.opts, self.cluster_name, die_on_error=False) print "Starting slaves..." for inst in slave_nodes: if inst.state not in ["shutting-down", "terminated"]: inst.start() print "Starting master..." for inst in master_nodes: if inst.state not in ["shutting-down", "terminated"]: inst.start() wait_for_cluster_state(cluster_instances=(master_nodes + slave_nodes), cluster_state='ssh-ready', opts=self.opts) setup_cluster(conn, master_nodes, slave_nodes, self.opts, False)
def get_all_instances(config: str, region: str) -> dict: """ Function to return all ec2 instances in a particular region by a certain tag. :param config: The config type - refer to `tags.py` for the list of valid tags. :param region: The AWS region to get ec2 instances for :return: Collection of all instances for the region. """ instance_dict = {'instances': []} conn = connect_to_region(region, aws_access_key_id=ACCESS_KEY_ID, aws_secret_access_key=ACCESS_KEY) instances = conn.get_only_instances(filters={"tag:config": config}) for instance in instances: instance_dict['instances'].append({'instance_id': instance.id, 'type': instance.instance_type, 'state': instance.state, 'public_dns_name': instance.public_dns_name, 'launch_time': instance.launch_time}) return instance_dict
def get_instances(instance_ids): """ Return an Instance objects for the given instance ids @param instance_ids: Instance ids (list) @return: Instance objects (dict) """ instances = dict() conn = connect_to_region(REGION, aws_access_key_id=AWS_KEY, aws_secret_access_key=AWS_SECRET) try: reservations = conn.get_all_instances(instance_ids) except EC2ResponseError, ex: print 'Got exception when calling EC2 for instances (%s): %s' % \ (", ".join(instance_ids), ex.error_message) return instances
def from_boot_config(cls, boot_config, region=None): """Create an AWSAccount from a SimpleEnvironment or JujuData.""" config = get_config(boot_config) euca_environ = get_euca_env(config) if region is None: region = config["region"] client = ec2.connect_to_region( region, aws_access_key_id=euca_environ['EC2_ACCESS_KEY'], aws_secret_access_key=euca_environ['EC2_SECRET_KEY']) # There is no point constructing a AWSAccount if client is None. # It can't do anything. if client is None: log.info( 'Failed to create ec2 client for region: {}.'.format(region)) yield None else: yield cls(euca_environ, region, client)
def _get_or_create_security_group(aws_region, aws_access_key_id, aws_secret_access_key, security_group_name, security_group_description, authorization_tuples=[('tcp', 22, 22, '0.0.0.0/0')]): conn = connect_to_region(aws_region, aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key) group = _get_security_group(conn, security_group_name) if not group: group = _create_security_group(conn, security_group_name, security_group_description, authorization_tuples) return group
def __init__(self, provider_config): try: self.config = provider_config["Amazon Credentials"] except KeyError: raise AwsLogicError("Required Values Amazon Credentials not there") if self.config: self.region = self.config["region"] self.access_key = self.config["accesskey"] self.secret_key = self.config["secretkey"] self.aws_conn = connect_to_region( self.region, aws_access_key_id=self.access_key, aws_secret_access_key=self.secret_key) self.vpc_conn = VPCConnection( aws_access_key_id=self.access_key, aws_secret_access_key=self.secret_key) else: pass
def main(): args = parsed_args() conn = ec2.connect_to_region(args.region) vols = conn.get_all_volumes(filters={'attachment.instance-id': args.instance_id}) matches = [x for x in vols if x.attach_data.device == args.mount_point] if (len(matches) == 1): code_volume = matches[0] else: raise Exception('No attached volume could be found for %s' % mount_point) print "found. the plan is to snapshot %s with tag %s" % (args.mount_point, args.tag) snap = code_volume.create_snapshot(snapshot_description(code_volume, args.instance_id)) snap.add_tag('Name', args.tag) print "done."
def terminate_num(tag, num): for regionName in AMIs.keys(): allHosts = get_instances(regionName, tag) instance_ids = [h.instanceid for h in allHosts] if len(instance_ids) < num: pprint( "Only %d instances to cancel; %d requested; cowardly not killing" % (len(instance_ids), num)) return instance_ids = instance_ids[:num] conn = ec2.connect_to_region(regionName) conn.terminate_instances(instance_ids) pprint("Terminated %d instances (%s)" % num, ' '.join(instance_ids))