def __init__(self, job_description, resource_url, pilot_compute_description): self.job_description = job_description logger.debug("URL: " + str(resource_url) + " Type: " + str(type(resource_url))) self.resource_url = saga.Url(str(resource_url)) self.pilot_compute_description = pilot_compute_description self.id = "bigjob-" + str(uuid.uuid1()) self.network_ip = None self.ec2_conn = None if self.resource_url.scheme == "euca+ssh" or self.resource_url.scheme == "nova+ssh": host = self.resource_url.host path = "/services/Eucalyptus" if self.resource_url.path != None: path = self.resource_url.path port = 8773 if self.resource_url.port != None: port = self.resource_url.port region = None logger.debug("Host: %s, Path: %s, Port: %d" % (host, path, port)) if self.resource_url.scheme == "euca+ssh": region = RegionInfo(name="eucalyptus", endpoint=host) elif self.resource_url.scheme == "nova+ssh": region = RegionInfo(name="openstack", endpoint=host) logger.debug("Access Key: %s Secret: %s" % (self.pilot_compute_description["access_key_id"], self.pilot_compute_description["secret_access_key"])) self.ec2_conn = EC2Connection( aws_access_key_id=self. pilot_compute_description["access_key_id"], aws_secret_access_key=self. pilot_compute_description["secret_access_key"], region=region, is_secure=False, port=port, path=path) else: aws_region = None if self.pilot_compute_description.has_key("region"): region = self.pilot_compute_description["region"] logger.debug("Connect to region: %s" % (str(region))) aws_region = boto.ec2.get_region( region, aws_access_key_id=self. pilot_compute_description["access_key_id"], aws_secret_access_key=self. pilot_compute_description["secret_access_key"]) self.ec2_conn = EC2Connection( aws_access_key_id=self. pilot_compute_description["access_key_id"], aws_secret_access_key=self. pilot_compute_description["secret_access_key"], region=aws_region) self.instance = None
def main(): (opts, source_region, source_image_id, image_name, image_arch) = parse_args() # Validate AMI conn = EC2Connection(region=ec2.get_region(source_region)) image = conn.get_image(source_image_id) if not image.is_public: print >> stderr, ("Image %s is not public, no one will be able to " \ "use it!" % source_image_id) sys.exit(1) if opts.files: if not os.path.exists(opts.base_directory): os.mkdir(opts.base_directory) for dest_region in DEST_REGIONS: try: region = ec2.get_region(dest_region) conn = EC2Connection(region=region, validate_certs=False) except Exception as e: print >> stderr, (e) sys.exit(1) new_image = conn.copy_image(source_region, source_image_id, image_name) print "Created new image: %s in %s" % (new_image.image_id, dest_region) if opts.files: dest_dir = os.path.join(opts.base_directory, dest_region) if not os.path.exists(dest_dir): os.mkdir(dest_dir) f = open(os.path.join(dest_dir, image_arch), 'w') f.write(new_image.image_id) f.close()
def __create_ec2conn(self): ec2conn = EC2Connection() regions = ec2conn.get_all_regions() for r in regions: if r.name == self.conf.get('ec2', 'region'): ec2conn = EC2Connection(region=r) return ec2conn return None
def conn(self): if not self.connection: if self.options.key and self.options.secret: self.connection = EC2Connection(self.options.key, self.options.secret) else: self.connection = EC2Connection() return self.connection
def get_public_dns(region=None): if region: conn = EC2Connection(AWS_KEY, AWS_SECRET, region=region) else: conn = EC2Connection(AWS_KEY, AWS_SECRET) print conn.region all = conn.get_all_instances() for x in all: print x.instances[0].public_dns_name, x.instances[0].private_dns_name
def create_client(): client = EC2Connection(config.get('IAM', 'access'), config.get('IAM', 'secret')) regions = client.get_all_regions() for r in regions: if r.name == config.get('EC2', 'region'): client = EC2Connection(config.get('IAM', 'access'), config.get('IAM', 'secret'), region=r) return client return None
def main(argv=sys.argv[1:]): try: imagename = argv[0] try: s = SafeConfigParser() s3cfg = os.getenv("HOME") + "/.s3cfg" s.readfp(open(s3cfg, "r")) s3id = s.get("default", "access_key") pw = s.get("default", "secret_key") host_base = s.get("default", "host_base") use_https = s.getboolean("default", "use_https") hba = host_base.split(":", 1) if len(hba) == 2: port = int(hba[1]) else: port = 8888 host = hba[0] except Exception, ex: print "This program uses the s3cmd configuration file ~/.s3cfg" print ex sys.exit(1) print "getting connection" ec2conn = EC2Connection(s3id, pw, host='locahost', port=8444, debug=2) ec2conn.host = 'localhost' print "getting image" image = ec2conn.get_image(imagename) print "running" res = image.run(min_count=2, max_count=4) res.stop_all()
def setUp(self): host = 'localhost' cumport = 8888 ec2port = 8444 self.db = DB(pycb.config.authzdb) self.friendly = os.environ['NIMBUS_TEST_USER'] self.can_user = User.get_user_by_friendly(self.db, self.friendly) s3a = self.can_user.get_alias_by_friendly(self.friendly, pynimbusauthz.alias_type_s3) x509a = self.can_user.get_alias_by_friendly( self.friendly, pynimbusauthz.alias_type_x509) self.subject = x509a.get_name() self.s3id = s3a.get_name() self.s3pw = s3a.get_data() self.s3user = s3a self.dnuser = x509a self.ec2conn = EC2Connection(self.s3id, self.s3pw, host=host, port=ec2port) self.ec2conn.host = host cf = OrdinaryCallingFormat() self.s3conn = S3Connection(self.s3id, self.s3pw, host=host, port=cumport, is_secure=False, calling_format=cf) self.db.commit() self.killall_running()
def handle(self, *args, **options): if len(args) > 0: if args[0] == "create": conn = EC2Connection(AWS_ID, AWS_SECRET_ACCESS_KEY) for pending_ci_server in JenkinsServer.objects.filter( is_active=False): cur_git_repo = pending_ci_server.project_set.all( )[0].git_repo if cur_git_repo.is_active: reservation = conn.run_instances( image_id='ami-03c1736a', key_name='ciboxbuild', instance_type='m1.small', security_groups=['default'], user_data="export github_repo=\"" + cur_git_repo.url + "\"") found_instance = False while not found_instance: time.sleep(30) for r in conn.get_all_instances(): found_instance = ( r.id == reservation.id and r.instances[0].public_dns_name != "") pending_ci_server.url = r.instances[0].public_dns_name pending_ci_server.is_active = True pending_ci_server.save() else: print("There were no valid arguments") else: print("There were no valid arguments")
def _connect_euca(self): if 'host' and 'port' not in self.site_desc: raise PhantomWebException("The site %s is misconfigured." % (self.cloudname)) if self.site_desc['secure']: scheme = "https" else: scheme = "http" site_url = "%s://%s:%s" % (scheme, self.site_desc['host'], str(self.site_desc['port'])) kwargs = {} uparts = urlparse.urlparse(site_url) is_secure = uparts.scheme == 'https' if self.site_desc.get('path') is not None: kwargs['path'] = self.site_desc['path'] ec2conn = EC2Connection(self.iaas_key, self.iaas_secret, host=uparts.hostname, port=uparts.port, is_secure=is_secure, validate_certs=False, **kwargs) ec2conn.host = uparts.hostname return ec2conn
def connect_euca(host, aws_access_key_id=None, aws_secret_access_key=None, port=8773, path='/services/Eucalyptus', is_secure=False, **kwargs): """ Connect to a Eucalyptus service. :type host: string :param host: the host name or ip address of the Eucalyptus server :type aws_access_key_id: string :param aws_access_key_id: Your AWS Access Key ID :type aws_secret_access_key: string :param aws_secret_access_key: Your AWS Secret Access Key :rtype: :class:`boto.ec2.connection.EC2Connection` :return: A connection to Eucalyptus server """ from boto.ec2 import EC2Connection from boto.ec2.regioninfo import RegionInfo reg = RegionInfo(name='eucalyptus', endpoint=host) return EC2Connection(aws_access_key_id, aws_secret_access_key, region=reg, port=port, path=path, is_secure=is_secure, **kwargs)
def make_connection(credentials): """ A general function to connect cloud provider endpoing using EC2 API @param credentials: A dictionary containing ec2 specific parameters for connecting to the endpoint. @type credentials: dictionary @return A boto ec2 connection object """ url = credentials['ec2_url'] url_path = str() url_endpoint = url.split('/')[2] url_protocol = url.split('/')[0].split(':')[0] if url_protocol == "https": secure = True elif url_protocol == "http": secure = False if len(url.split(':')) > 2: url_port = url.split(':')[2].split('/')[0] url_path = url.split(url_port)[1] hs_region = EC2RegionInfo(name=credentials['ec2_region'], endpoint=url_endpoint) conn = EC2Connection(aws_access_key_id=credentials['ec2_access_key'], aws_secret_access_key=credentials['ec2_secret_key'], is_secure=secure, path=url_path, region=hs_region) return conn
def make_snapshot(key, access, cluster, name="main", expiration='weekly', device="/dev/sdf"): # first get the mountpoint (requires some energy, but we can...) df = subprocess.Popen(["/bin/df", device], stdout=subprocess.PIPE) output = df.communicate()[0] dummy, size, used, available, percent, mountpoint = \ output.split("\n")[1].split() region_info = RegionInfo(name=region, endpoint="ec2.{0}.amazonaws.com".format(region)) ec2 = EC2Connection(key, access, region=region_info) # if we have the device (/dev/sdf) just don't do anything anymore mapping = ec2.get_instance_attribute(instance_id, 'blockDeviceMapping') try: volume_id = mapping['blockDeviceMapping'][device].volume_id os.system("/usr/sbin/xfs_freeze -f {0}".format(mountpoint)) snapshot = ec2.create_snapshot( volume_id, "Backup of {0} - for {1}/{2} - expires {3}".format( volume_id, cluster, name, expires[expiration])) os.system("/usr/sbin/xfs_freeze -u {0}".format(mountpoint)) except Exception as e: print e return ["{0}".format(snapshot.id), expires[expiration]]
def setup_ec2(): AMI_NAME = 'ami-834cf1ea' # Ubuntu 64-bit 12.04 LTS # INSTANCE_TYPE = 'c1.medium' INSTANCE_TYPE = 'c1.medium' conn = EC2Connection(django_settings.AWS_ACCESS_KEY_ID, django_settings.AWS_SECRET_ACCESS_KEY) reservation = conn.run_instances(AMI_NAME, instance_type=INSTANCE_TYPE, key_name=env.user, security_groups=['db-mongo']) instance = reservation.instances[0] print "Booting reservation: %s/%s (size: %s)" % (reservation, instance, INSTANCE_TYPE) i = 0 while True: if instance.state == 'pending': print ".", sys.stdout.flush() instance.update() i += 1 time.sleep(i) elif instance.state == 'running': print "...booted: %s" % instance.public_dns_name time.sleep(5) break else: print "!!! Error: %s" % instance.state return host = instance.public_dns_name env.host_string = host
def setUp(self): host = 'localhost' cumport = 8888 ec2port = 8444 self.db = DB(pycb.config.authzdb) self.friendly = self.cb_random_bucketname(21) self.can_user = User(self.db, friendly=self.friendly, create=True) self.subject = self.cb_random_bucketname(21) self.s3id = self.cb_random_bucketname(21) self.s3pw = self.cb_random_bucketname(42) self.s3user = self.can_user.create_alias(self.s3id, pynimbusauthz.alias_type_s3, self.friendly, self.s3pw) self.dnuser = self.can_user.create_alias(self.subject, pynimbusauthz.alias_type_x509, self.friendly) self.ec2conn = EC2Connection(self.s3id, self.s3pw, host=host, port=ec2port, debug=2) self.ec2conn.host = host cf = OrdinaryCallingFormat() self.s3conn = S3Connection(self.s3id, self.s3pw, host=host, port=cumport, is_secure=False, calling_format=cf) self.db.commit()
def main(): print "Connecting to EC2" conn = EC2Connection(accessKeyId, accessKeySecret) print "Connected to EC2" reservations = conn.get_all_instances() instance = find_target(target, reservations) hours_since_last_backup = find_hours_since_last_backup(conn, instance) if hours_since_last_backup and hours_since_last_backup < backup_interval_hours: print "Only {} hours passed since last backup, waiting until {} pass".format( hours_since_last_backup, backup_interval_hours) return backup_name = target + "_" + re.sub("[: .]", "-", str(datetime.datetime.now())) print "Backing up instance '{}' with id '{}' - snapshot {} will be created".format( instance, instance.id, backup_name) target_description = "Backup of " + target # TODO conn.create_image(instance.id, backup_name, target_description, True) delete_oldest_backups(conn, number_of_backups_to_keep, target_description) print "Done"
def searchEc2(name): try: from boto.ec2.connection import EC2Connection from boto import Version if Version < '2.9': raise Exception() except: print "\n Fatal: Python module/library boto >=2.9 is required for -e \n" sys.exit(2) if 'AWS_ACCESS_KEY_ID' not in os.environ or 'AWS_SECRET_ACCESS_KEY' not in os.environ: printc( "please set env variable for AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY before using -e, like: ", RED) printc(" export AWS_ACCESS_KEY_ID=......", RED) printc(" export AWS_SECRET_ACCESS_KEY=....", RED) sys.exit(1) serverList = {} conn = EC2Connection() instances = conn.get_all_instances() for i in instances: for instance in i.instances: if instance.state != "running": continue tags = instance.tags if tags.has_key("Name") and name.search(tags["Name"]): if instance.public_dns_name == "": serverList[instance.private_ip_address] = tags["Name"] else: serverList[instance.public_dns_name] = tags["Name"] return serverList
def run(self): print(Fore.GREEN + self.name + ' : running' + Fore.RESET) conn = EC2Connection(credentials.EC2_ACCESS_ID, credentials.EC2_SECRET_KEY) node_name = 'worker_' + self.name instance = create_node(conn, node_name) instance.add_tag('job', JOB) print(self.name + ' : instance domain name is ' + instance.public_dns_name) #time.sleep(30) self.start_time = time.time() print(self.name + ':' + str( copy_file_to_instance(self.input_file_name, instance.public_dns_name, 'input.dna'))) print(self.name + ':' + str( run_command_on_instance( 'cp /home/ubuntu/iprscan/interproscan-5-RC1/interproscan.properties.' + str(self.processors) + ' /home/ubuntu/iprscan/interproscan-5-RC1/interproscan.properties', instance.public_dns_name))) print(self.name + ':' + str( run_command_on_instance( '/home/ubuntu/iprscan/interproscan-5-RC1/interproscan.sh -appl ProDom-2006.1,PfamA-26.0,TIGRFAM-10.1,SMART-6.2,Gene3d-3.3.0,Coils-2.2,Phobius-1.01 -i /home/ubuntu/input.dna -t n', instance.public_dns_name))) #print(self.name + ':' + str(run_command_on_instance('/home/ubuntu/iprscan/interproscan-5-RC1/interproscan.sh -appl PfamA-26.0,Coils-2.2,Phobius-1.01 -i /home/ubuntu/input.dna -t n', instance.public_dns_name))) print(self.name + ':' + str( copy_file_from_instance('input.dna.gff3', instance.public_dns_name, self.input_file_name + '.out'))) print(self.name + ' : destroying node') # instance.terminate() seconds = time.time() - self.start_time print(Fore.MAGENTA + self.name + ' : completed in ' + str(int(seconds)) + Fore.RESET)
def setUp(self): host = 'localhost' cumport = 8888 ec2port = 8444 try: ec2port = int(os.environ['NIMBUS_TEST_EC2_PORT']) except: pass try: cumport = int(os.environ['NIMBUS_TEST_S3_PORT']) except: pass self.db = DB(pycb.config.authzdb) self.friendly = self.cb_random_bucketname(21) self.can_user = User(self.db, friendly=self.friendly, create=True) self.subject = self.cb_random_bucketname(21) self.s3id = self.cb_random_bucketname(21) self.s3pw = self.cb_random_bucketname(42) self.s3user = self.can_user.create_alias(self.s3id, pynimbusauthz.alias_type_s3, self.friendly, self.s3pw) self.dnuser = self.can_user.create_alias(self.subject, pynimbusauthz.alias_type_x509, self.friendly) self.ec2conn = EC2Connection(self.s3id, self.s3pw, host=host, port=ec2port, debug=2) self.ec2conn.host = host cf = OrdinaryCallingFormat() self.s3conn = S3Connection(self.s3id, self.s3pw, host=host, port=cumport, is_secure=False, calling_format=cf) self.db.commit() nh = get_nimbus_home() groupauthz_dir = os.path.join(nh, "services/etc/nimbus/workspace-service/group-authz/") add_member(groupauthz_dir, self.subject, 4)
def setUp(self): """Setup our EC2 instance""" self.ec2 = EC2Connection() self.volume = self.ec2.create_volume(1, 'us-east-1a') self.snapshots = [] self.snapshots.append(self.volume.create_snapshot('rotatelib_backup20121110')) self.snapshots.append(self.volume.create_snapshot('rotatelib_backup'))
def create_aws_instance(): '''Initialize an AWS instance''' print('Creating an AWS instance...') conn = EC2Connection() # start an instance of Ubuntu 10.04 ami_ubuntu10_04 = conn.get_all_images(image_ids=['ami-3202f25b']) reservation = ami_ubuntu10_04[0].run( \ key_name='ec2-keypair', \ security_groups=['OCG_group'], \ instance_type='m1.large', \ ) instance = reservation.instances[0] sleep(1) while instance.state!=u'running': print("Instance state = {0}".format(instance.state)) instance.update() sleep(5) print("Instance state = {0}".format(instance.state)) sleep(5) # add a tag to name the instance instance.add_tag('Name','OpenClimateGIS') print("PUBLIC_DNS={0}".format(instance.dns_name)) local('export PUBLIC_DNS={0}'.format(instance.dns_name)) local('export INSTANCE_ID={0}'.format(instance.id)) return instance.id
def connect_cloud(): ''' This function uses ec2 API to connect to various cloud providers. Note that, it only connects to one cloud at a time. @type cred_dict: dictionary @param cred_dict: A dictionary containing access, secret and connection urls. @return: NULL @todo: add support for other cloud providers ''' global conn url = cred_dict['ec2_url'] url_endpoint = url.split('/')[2] url_port = url.split(':')[2].split('/')[0] url_path = url.split(url_port)[1] url_protocol = url.split(":")[0] provider = cloud_provider(url) # A default region is required by boto for initiating connection. region_var = EC2RegionInfo(name="tmp.hadoopstack", endpoint=url_endpoint) if provider == "openstack": if url_protocol == "http": conn = EC2Connection(cred_dict['ec2_access_key'], cred_dict['ec2_secret_key'], region=region_var, is_secure=False, path=url_path) print conn return
def launchInstances(self): log.info( 'Starting {0} EC2 instances of type {1} with image {2}'.format( self.num_nodes, self.os, self.ami)) try: conn = EC2Connection(self.aws_access_key_id, self.aws_secret_access_key) reservation = conn.run_instances( self.ami, max_count=self.num_nodes, key_name=self.key_name, security_groups=[self.security_groups], instance_type=self.instance_type, placement=self.zone) log.info('ReservationID: {0}'.format(reservation.id)) log.info('Instances: {0}'.format( ManageEC2.get_instances(reservation))) #wait for instances to boot up self.wait_for_instances_to_boot(reservation) self.create_instance_tags(reservation, prefix=self.tags) if self.num_ebs > 0: ManageEC2.launchEBS(conn, reservation, self) finally: return conn, reservation
def _connect_ec2(self): ec2_region = self.site_desc.get("region") if ec2_region is not None: region = boto.ec2.get_region(ec2_region) ec2conn = EC2Connection(self.iaas_key, self.iaas_secret, region=region) return ec2conn
def __init__(self, config = None, connect = True): """ :type config: str :param config: An alternative configuration directory. The default is ./ec2helper.ini, relative to the script's directory. """ if config is not None: conffile = config else: conffile = os.path.join(os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))), 'ec2helper.ini') self.config = SafeConfigParser() self.config.optionxform = str # make config case-sensitive try: self.config.read(conffile) except MissingSectionHeaderError: sys.exit('Malformed configuration file: %s' % conffile) aws_conf = self.get_conf('aws', required = False) if 'region' in aws_conf: aws_conf['region'] = boto.ec2.get_region(aws_conf['region']) if connect: try: self.ec2 = EC2Connection(**aws_conf) except NoAuthHandlerFound: sys.exit('Missing \'aws_access_key_id\' or \'aws_secret_access_key\'')
def launch_new(): for i in range(num_workers): try: instance_type = "m1.small" sr = conn.request_spot_instances(price=max_bid, image_id=ami, count=1, type='persistent', user_data=user_data, instance_type=instance_type, key_name=key_name, security_groups=security_groups) sp = sr[0] sp.add_tag("converterspot", "alt1") print sp, sp.tags except Exception as ex: print "request small failed" print "we will try to request a MEDIUM one" instance_type = "m1.medium" conn = EC2Connection(AWS_KEY, AWS_SECRET, region=get_region_info('sa-east-1')) sr = conn.request_spot_instances(price=max_bid, image_id=ami, count=1, type='persistent', user_data=user_data, instance_type=instance_type, key_name=key_name, security_groups=security_groups) sp = sr[0] sp.add_tag("converterspot", "alt1") print sp, sp.tags
def main(): options = parseCommandLine() if options is None: return conn = EC2Connection() reservations = conn.get_all_instances() for reservation in reservations: for instance in reservation.instances: if instance.state != 'running': continue if 'stack' in instance.tags: stackName = instance.tags['stack'] if options.stackName is None or stackName.lower( ) == options.stackName: if not options.nodeName or \ ('Name' in instance.tags and instance.tags['Name'].lower() == options.nodeName): if len(instance.public_dns_name) > 0: print instance.public_dns_name sys.exit(0) print "error: unable to find instance matching stack-name '%s'" % options.stackName sys.exit(1)
def get_tags(agentConfig): if not agentConfig['collect_instance_metadata']: log.info("Instance metadata collection is disabled. Not collecting it.") return [] socket_to = None try: socket_to = socket.getdefaulttimeout() socket.setdefaulttimeout(EC2.TIMEOUT) except Exception: pass try: iam_role = urllib2.urlopen(EC2.URL + "/iam/security-credentials").read().strip() iam_params = json.loads(urllib2.urlopen(EC2.URL + "/iam/security-credentials" + "/" + unicode(iam_role)).read().strip()) from boto.ec2.connection import EC2Connection connection = EC2Connection(aws_access_key_id=iam_params['AccessKeyId'], aws_secret_access_key=iam_params['SecretAccessKey'], security_token=iam_params['Token']) instance_object = connection.get_only_instances([EC2.metadata['instance-id']])[0] EC2_tags = [u"%s:%s" % (tag_key, tag_value) for tag_key, tag_value in instance_object.tags.iteritems()] except Exception: log.exception("Problem retrieving custom EC2 tags") EC2_tags = [] try: if socket_to is None: socket_to = 3 socket.setdefaulttimeout(socket_to) except Exception: pass return EC2_tags
def clean_backups(): """ dumb script that cleans up all the duplicate ebs snapshots our two cron servers create while backing up redis """ conn = EC2Connection(*aws) snapshots = conn.get_all_snapshots() shots = defaultdict(list) for snapshot in conn.get_all_snapshots(owner=352407978521): if snapshot.tags.get('Name') is not None: t = snapshot.tags['Name'] ttype = "" if 'Pink' in t: ttype = 'pink' elif 'Yellow' in t: ttype = 'yellow' dt = datetime.strptime(snapshot.start_time, "%Y-%m-%dT%H:%M:%S.000Z") key = (ttype, dt.year, dt.month, dt.day) val = snapshot.id shots[key].append(val) to_delete = [] for k, v in shots.iteritems(): if len(v) >= 2: to_delete.append(v[0]) for d in to_delete: print "deleting", d, "..." conn.delete_snapshot(d)
def __init__(self, access_key, secret_key, ip_address=None): #save my ec2 connection info self.access_key = access_key self.secret_key = secret_key self.ec2conn = EC2Connection(access_key, secret_key) self.sshconn = None #match up the ip with an instance, if there is one if ip_address is not None: self.ip_address = ip_address self.instance_name = None reservations = self.ec2conn.get_all_instances() #find our instance, and check to see if its state is running instance_list = [i for r in reservations for i in r.instances] for i in instance_list: if i.__dict__[ 'ip_address'] is not None: #you can'te encode a nonetype, so you have to check this first. silly python. if i.__dict__['ip_address'].encode( 'ascii', 'ignore') == self.ip_address: self.instance_name = i.__dict__['id'] print self.instance_name if self.instance_name is None: print "there is no image at that ip address" public_dns = "ec2-%s-%s-%s-%s.compute-1.amazonaws.com" % ( self.ip_address.split('.')[0], self.ip_address.split('.')[1], self.ip_address.split('.')[2], self.ip_address.split('.')[3]) self.public_dns = public_dns.encode('ascii', 'ignore')