def validate_provider_data(self, data, files=None): # NOQA errors = super(AWSCloudProvider, self) \ .validate_provider_data(data, files) if errors: return errors # check authentication credentials try: ec2 = boto.ec2.connect_to_region( data[self.REGION], aws_access_key_id=data[self.ACCESS_KEY], aws_secret_access_key=data[self.SECRET_KEY]) ec2.get_all_zones() except boto.exception.EC2ResponseError, e: err_msg = 'Unable to authenticate to AWS with the provided keys.' errors.setdefault(self.ACCESS_KEY, []).append(err_msg) errors.setdefault(self.SECRET_KEY, []).append(err_msg)
def createClusterDC(datacenter,region,zone,nodes,node_type,nb_seeds_per_dc,cluster_name,opscenter): cluster = {'cluster':cluster_name,'datacenter':datacenter,'region':region,'zone':zone,'nodes':nodes,'node_type':node_type,'nb_seeds_per_dc':nb_seeds_per_dc,'instances':[],'opscenter':opscenter} try: # Connection ec2 = boto.ec2.connect_to_region(region+'-'+zone) # AZ disponibles AZlist = ec2.get_all_zones() AZ = [] for AZitem in AZlist: if AZitem.state == 'available': AZ.append(AZitem.name) if (len(AZ) == 0): # No available zone print "No available AZ !" return None # On récupère les infos pour l'AMI, key, SG info = filter(lambda x: ((x['region'] == region) and (x['zone'] == zone)), conf_HVM)[0] # Onlyne one AZ available ? if (len(AZ) == 1): # we create all instances in the same AZ createInstance(ec2,info['ami'],nodes,AZ[0],node_type,info['key'],info['sg'],'--clustername '+cluster_name+' --totalnodes '+str(nodes)+' --version community') else: # we cycle Availability Zones iAZ = 0 for i in range(nodes): if ((i == 0) and (opscenter == True)): # On installe OpsCenter sur ce premier noeud Master instance = createInstance(ec2,info['ami'],1,AZ[iAZ],node_type,info['key'],info['sg'],'--clustername '+cluster_name+' --totalnodes 1 --version community') else: # Node without opscenter instance = createInstance(ec2,info['ami'],1,AZ[iAZ],node_type,info['key'],info['sg'],'--clustername '+cluster_name+' --opscenter no --totalnodes 1 --version community') node_name = "["+datacenter+"] " if i == 0: node_name += ' M ' else: node_name += ' '+str(i+1)+' ' node_name += cluster_name time.sleep(5) instance.add_tag('Name', node_name) instance.add_tag('Cluster', cluster_name) cluster['instances'].append({'instance':instance,'index':str(i+1),'creation':datetime.datetime.now(),'region':region,'zone':zone,'datacenter':datacenter,'AZ':AZ[iAZ]}) iAZ += 1 if (iAZ == len(AZ)): iAZ = 0 return cluster except Exception as e: logError(e) return None
def ec2run_completers(self, event): cmd_param = event.line.split() if event.line.endswith(' '): cmd_param.append('') arg = cmd_param.pop() arg = cmd_param.pop() if arg in ('-t', '--instance-type'): return [ 'm1.small', 'm1.large', 'm1.xlarge', 'c1.medium', 'c1.xlarge', 'm2.xlarge', 'm2.2xlarge', 'm2.4xlarge', 'cc1.4xlarge', 't1.micro' ] elif arg in ('-k', '--keys'): return [k.name for k in ec2.get_all_key_pairs()] elif arg in ('-n', '--instance-count'): return ['1', '1-'] # just examples really elif arg in ('-g', '--group'): return [g.name for g in ec2.get_all_security_groups()] elif arg in ('-d', '--user-data'): return [] elif arg in ('-f', '--user-data-file'): return [] # TODO hook normal file complete elif arg in ('-z', '--availability-zone'): return [z.name for z in ec2.get_all_zones()] elif arg in ('--instance-initiated-shutdown-behavior'): return ['stop', 'terminate'] elif arg in ('--placement-group'): return [g.name for g in ec2.get_all_placement_groups()] elif arg in ('--private-ip-address'): return [] elif arg in ('--kernel'): return [] # TODO elif arg in ('--ramdisk'): return [] # TODO elif arg in ('--subnet'): return [] # TODO else: params = ec2run_parameters[:] # drop from params any already used for c in cmd_param: o = ec2run_parser.get_option(c) if o: for v in o._short_opts + o._long_opts: if v in params: params.remove(v) return params + ami.keys()
def ec2run_completers(self, event): cmd_param = event.line.split() if event.line.endswith(' '): cmd_param.append('') arg = cmd_param.pop() arg = cmd_param.pop() if arg in ('-t', '--instance-type'): return ['m1.small', 'm1.large', 'm1.xlarge', 'c1.medium', 'c1.xlarge', 'm2.xlarge', 'm2.2xlarge', 'm2.4xlarge', 'cc1.4xlarge', 't1.micro'] elif arg in ('-k', '--keys'): return [k.name for k in ec2.get_all_key_pairs()] elif arg in ('-n', '--instance-count'): return ['1', '1-'] # just examples really elif arg in ('-g', '--group'): return [g.name for g in ec2.get_all_security_groups()] elif arg in ('-d', '--user-data'): return [] elif arg in ('-f', '--user-data-file'): return [] # TODO hook normal file complete elif arg in ('-z', '--availability-zone'): return [z.name for z in ec2.get_all_zones()] elif arg in ('--instance-initiated-shutdown-behavior'): return ['stop', 'terminate'] elif arg in ('--placement-group'): return [g.name for g in ec2.get_all_placement_groups()] elif arg in ('--private-ip-address'): return [] elif arg in ('--kernel'): return [] # TODO elif arg in ('--ramdisk'): return [] # TODO elif arg in ('--subnet'): return [] # TODO else: params = ec2run_parameters[:] # drop from params any already used for c in cmd_param: o = ec2run_parser.get_option(c) if o: for v in o._short_opts + o._long_opts: if v in params: params.remove(v) return params + ami.keys()
def validate_provider_data(self, serializer_attrs, all_data): attrs = super(AWSCloudProvider, self).validate_provider_data(serializer_attrs, all_data) region = attrs[self.REGION].slug access_key = all_data[self.ACCESS_KEY] secret_key = all_data[self.SECRET_KEY] keypair = all_data[self.KEYPAIR] errors = {} from stackdio.api.cloud.models import CloudAccount # Check for duplicates accounts = CloudAccount.objects.filter(provider__name=self.SHORT_NAME) for account in accounts: account_yaml = yaml.safe_load(account.yaml) if account.region.slug == region and account_yaml[account.slug]['id'] == access_key: err_msg = ('You may not have multiple cloud accounts with the same access key ' 'in the same region. Please generate a new access key if you would ' 'like to have 2 cloud accounts in the same AWS account.') errors.setdefault(self.REGION, []).append(err_msg) if errors: raise ValidationError(errors) # check authentication credentials ec2 = None try: ec2 = boto.ec2.connect_to_region( region, aws_access_key_id=access_key, aws_secret_access_key=secret_key, ) ec2.get_all_zones() except boto.exception.EC2ResponseError: err_msg = 'Unable to authenticate to AWS with the provided keys.' errors.setdefault(self.ACCESS_KEY, []).append(err_msg) errors.setdefault(self.SECRET_KEY, []).append(err_msg) if errors: raise ValidationError(errors) # check keypair try: ec2.get_all_key_pairs(keypair) except boto.exception.EC2ResponseError: errors.setdefault(self.KEYPAIR, []).append( 'The keypair \'{0}\' does not exist in this account.'.format(keypair) ) # check route 53 domain domain = all_data[self.ROUTE53_DOMAIN] if domain: try: # connect to route53 and check that the domain is available r53 = boto.connect_route53(access_key, secret_key) found_domain = False hosted_zones = r53.get_all_hosted_zones() hosted_zones = hosted_zones['ListHostedZonesResponse']['HostedZones'] for hosted_zone in hosted_zones: if hosted_zone['Name'].startswith(domain): found_domain = True break if not found_domain: err = 'The Route53 domain \'{0}\' does not exist in ' \ 'this account.'.format(domain) errors.setdefault(self.ROUTE53_DOMAIN, []).append(err) # except boto.exception.DNSServerError as e: except Exception as e: logger.exception('Route53 issue?') errors.setdefault(self.ROUTE53_DOMAIN, []).append(str(e)) # check VPC required fields vpc_id = attrs[self.VPC_ID] if vpc_id: vpc = None try: vpc = boto.vpc.connect_to_region( region, aws_access_key_id=access_key, aws_secret_access_key=secret_key, ) except boto.exception.EC2ResponseError: err_msg = ('Unable to authenticate to AWS VPC with the ' 'provided keys.') errors.setdefault(self.ACCESS_KEY, []).append(err_msg) errors.setdefault(self.SECRET_KEY, []).append(err_msg) if not errors: try: vpc.get_all_vpcs([vpc_id]) except boto.exception.EC2ResponseError: errors.setdefault(self.VPC_ID, []).append( 'The VPC \'{0}\' does not exist in this account.' .format(vpc_id) ) if errors: raise ValidationError(errors) return attrs
def createClusterDC(datacenter, region, zone, nodes, node_type, nb_seeds_per_dc, cluster_name, opscenter): cluster = { 'cluster': cluster_name, 'datacenter': datacenter, 'region': region, 'zone': zone, 'nodes': nodes, 'node_type': node_type, 'nb_seeds_per_dc': nb_seeds_per_dc, 'instances': [], 'opscenter': opscenter } try: # Connection ec2 = boto.ec2.connect_to_region(region + '-' + zone) # AZ disponibles AZlist = ec2.get_all_zones() AZ = [] for AZitem in AZlist: if AZitem.state == 'available': AZ.append(AZitem.name) if (len(AZ) == 0): # No available zone print "No available AZ !" return None # On récupère les infos pour l'AMI, key, SG info = filter( lambda x: ((x['region'] == region) and (x['zone'] == zone)), conf_HVM)[0] # Onlyne one AZ available ? if (len(AZ) == 1): # we create all instances in the same AZ createInstance( ec2, info['ami'], nodes, AZ[0], node_type, info['key'], info['sg'], '--clustername ' + cluster_name + ' --totalnodes ' + str(nodes) + ' --version community') else: # we cycle Availability Zones iAZ = 0 for i in range(nodes): if ((i == 0) and (opscenter == True)): # On installe OpsCenter sur ce premier noeud Master instance = createInstance( ec2, info['ami'], 1, AZ[iAZ], node_type, info['key'], info['sg'], '--clustername ' + cluster_name + ' --totalnodes 1 --version community') else: # Node without opscenter instance = createInstance( ec2, info['ami'], 1, AZ[iAZ], node_type, info['key'], info['sg'], '--clustername ' + cluster_name + ' --opscenter no --totalnodes 1 --version community') node_name = "[" + datacenter + "] " if i == 0: node_name += ' M ' else: node_name += ' ' + str(i + 1) + ' ' node_name += cluster_name time.sleep(5) instance.add_tag('Name', node_name) instance.add_tag('Cluster', cluster_name) cluster['instances'].append({ 'instance': instance, 'index': str(i + 1), 'creation': datetime.datetime.now(), 'region': region, 'zone': zone, 'datacenter': datacenter, 'AZ': AZ[iAZ] }) iAZ += 1 if (iAZ == len(AZ)): iAZ = 0 return cluster except Exception as e: logError(e) return None
def validate_provider_data(self, serializer_attrs, all_data): attrs = super(AWSCloudProvider, self).validate_provider_data(serializer_attrs, all_data) region = attrs[self.REGION].slug access_key = all_data[self.ACCESS_KEY] secret_key = all_data[self.SECRET_KEY] keypair = all_data[self.KEYPAIR] errors = {} from stackdio.api.cloud.models import CloudAccount # Check for duplicates accounts = CloudAccount.objects.filter(provider__name=self.SHORT_NAME) for account in accounts: account_yaml = yaml.safe_load(account.yaml) if account.region.slug == region and account_yaml[ account.slug]['id'] == access_key: err_msg = ( 'You may not have multiple cloud accounts with the same access key ' 'in the same region. Please generate a new access key if you would ' 'like to have 2 cloud accounts in the same AWS account.') errors.setdefault(self.REGION, []).append(err_msg) if errors: raise ValidationError(errors) # check authentication credentials ec2 = None try: ec2 = boto.ec2.connect_to_region( region, aws_access_key_id=access_key, aws_secret_access_key=secret_key, ) ec2.get_all_zones() except boto.exception.EC2ResponseError: err_msg = 'Unable to authenticate to AWS with the provided keys.' errors.setdefault(self.ACCESS_KEY, []).append(err_msg) errors.setdefault(self.SECRET_KEY, []).append(err_msg) if errors: raise ValidationError(errors) # check keypair try: ec2.get_all_key_pairs(keypair) except boto.exception.EC2ResponseError: errors.setdefault(self.KEYPAIR, []).append( 'The keypair \'{0}\' does not exist in this account.'.format( keypair)) # check route 53 domain domain = all_data[self.ROUTE53_DOMAIN] if domain: try: # connect to route53 and check that the domain is available r53 = boto.connect_route53(access_key, secret_key) found_domain = False hosted_zones = r53.get_all_hosted_zones() hosted_zones = hosted_zones['ListHostedZonesResponse'][ 'HostedZones'] for hosted_zone in hosted_zones: if hosted_zone['Name'].startswith(domain): found_domain = True break if not found_domain: err = 'The Route53 domain \'{0}\' does not exist in ' \ 'this account.'.format(domain) errors.setdefault(self.ROUTE53_DOMAIN, []).append(err) # except boto.exception.DNSServerError as e: except Exception as e: logger.exception('Route53 issue?') errors.setdefault(self.ROUTE53_DOMAIN, []).append(str(e)) # check VPC required fields vpc_id = attrs[self.VPC_ID] if vpc_id: vpc = None try: vpc = boto.vpc.connect_to_region( region, aws_access_key_id=access_key, aws_secret_access_key=secret_key, ) except boto.exception.EC2ResponseError: err_msg = ('Unable to authenticate to AWS VPC with the ' 'provided keys.') errors.setdefault(self.ACCESS_KEY, []).append(err_msg) errors.setdefault(self.SECRET_KEY, []).append(err_msg) if not errors: try: vpc.get_all_vpcs([vpc_id]) except boto.exception.EC2ResponseError: errors.setdefault(self.VPC_ID, []).append( 'The VPC \'{0}\' does not exist in this account.'. format(vpc_id)) if errors: raise ValidationError(errors) return attrs
TAG_ROLE_KEY = 'ROLE' INSTANCES = { # key on Name that we set as tag and present in Console 'science-left-app' : {'az': AZ_LEFT, 'type': INSTANCE_4GB, 'role': 'app'}, 'science-left-adm' : {'az': AZ_LEFT, 'type': INSTANCE_4GB, 'role': 'adm'}, 'science-left-db' : {'az': AZ_LEFT, 'type': INSTANCE_8GB, 'role': 'db' }, 'science-right-app' : {'az': AZ_RIGHT, 'type': INSTANCE_4GB, 'role': 'app'}, 'science-right-adm' : {'az': AZ_RIGHT, 'type': INSTANCE_4GB, 'role': 'adm'}, 'science-right-db ' : {'az': AZ_RIGHT, 'type': INSTANCE_8GB, 'role': 'db' }, } logging.basicConfig(level=logging.INFO) ec2 = boto.ec2.connect_to_region(REGION) zones = ec2.get_all_zones() logging.info('Zones: %s' % zones) # should look for a science pair and if none, create and save it. # boto.ec2.securitygroup... key_pair = ec2.get_key_pair(KEY_NAME) logging.info('Key pair: %s' % key_pair) #key_pair = ec2.create_key_pair(KEY_NAME) #key_pair.save(os.path.expanduser(os.path.join('~', '.ssh'))) for name, settings in INSTANCES.items(): reservation = ec2.run_instances(instance_type=settings['type'], placement=settings['az'], user_data='NAME=%s' % name, # How best to use? image_id=IMAGE_ID,
def deploy_static(app_name, env_name, domain, force): app = App(env_name, app_name) bucket_name = domain or '{}-{}'.format(config.get('system_name', uuid.uuid1().hex), app.repo.name) app.repo.fetch() version = app.repo.head_commit_id() s3 = boto.connect_s3() b = s3.lookup(bucket_name) if b is not None: version_key = b.get_key('__VERSION__') if version_key is not None: current_version = version_key.get_metadata('git-version') if version == current_version: if force: print '-----> Version {} already deployed, but re-deploying anyway'.format(version) else: print '-----> Version {} already deployed!'.format(version) return with lcd(app.repo.path): build_cmd = app.config.get('build_script') if build_cmd: print '-----> Building' local(build_cmd) if b is None: print '-----> Creating bucket {}'.format(bucket_name) b = s3.create_bucket(bucket_name) # TODO: this policy allows all users read access to all objects. # Need to find a way to limit access to __VERSION__ to only authenticated # users. public_access_policy = json.dumps({"Version":"2012-10-17", "Statement":[{"Sid":"PublicReadForGetBucketObjects", "Effect":"Allow", "Principal": "*", "Action":["s3:GetObject"], "Resource":["arn:aws:s3:::{}/*".format(bucket_name)]}]}) b.set_policy(public_access_policy) #b.configure_versioning(versioning=False) b.configure_website(suffix="index.html", error_key="error.html") def map_key_to_obj(m, obj): if obj.key != '__VERSION__': m[obj.key] = obj return m existing_keys = reduce(map_key_to_obj, b.get_all_keys(), {}) root = normpath(join(app.repo.path, app.config.get('root_dir', ''))) app_redirects = app.config.get('redirects', {}) for key_name in app_redirects.keys(): existing_keys.pop(key_name, None) print '-----> Uploading {} to {} bucket'.format(root, bucket_name) new_keys = [] updated_keys = [] for dirname, dirnames, filenames in walk(root): reldirname = relpath(dirname, root) reldirname = '' if reldirname == '.' else reldirname if os.path.commonprefix(['.git', reldirname]) == '.git': continue for filename in filenames: full_filename = join(reldirname, filename) if full_filename == '.s3': continue new_or_update = ' ' if existing_keys.has_key(full_filename): new_or_update = '[UPDATE]' updated_keys.append(full_filename) key = existing_keys.pop(full_filename) else: new_or_update = '[NEW] ' new_keys.append(full_filename) key = b.new_key(full_filename) print ' {} Uploading {}'.format(new_or_update, full_filename) key.set_contents_from_filename(join(dirname, filename)) if len(existing_keys) > 0: print '-----> WARNING: the following files are still present but no' print ' longer part of the website:' for k,v in existing_keys.iteritems(): print ' {}'.format(k) print '-----> Tagging bucket with git version {}'.format(version) version_key = b.get_key('__VERSION__') if version_key: version_key.delete() version_key = b.new_key('__VERSION__') version_key.set_metadata('git-version', version) version_key.set_contents_from_string('') print '-----> Setting up redirects' app_redirects = app.config.get('redirects', {}) if len(app_redirects) == 0: print ' No redirects.' else: def get_or_new_key(bucket, name): key = bucket.get_key(name) if key is not None: key.delete() return bucket.new_key(name) elb = boto.connect_elb() pybars_compiler = pybars.Compiler() for key_name, redirect_source in app_redirects.iteritems(): redirect_template = pybars_compiler.compile(redirect_source) app_redirects[key_name] = redirect_template data = { 'webui_dns': elb.get_all_load_balancers(load_balancer_names=['{}-web-ui'.format(env_name)])[0].dns_name } for key_name, redirect_template in app_redirects.iteritems(): k = get_or_new_key(b, key_name) redirect = unicode(redirect_template(data)) print ' Redirect {} to {}'.format(key_name, redirect) k.set_redirect(redirect) print '=====> Deployed to {}!'.format(b.get_website_endpoint()) if domain is not None: # TODO: support redirection from www.<domain> # b_www = 'www.{}'.format(bucket_name) ec2 = boto.connect_ec2() region_name = first([z.region.name for z in ec2.get_all_zones() if z.name == config['availability_zone']]) s3_website_region = s3_website_regions[region_name] route53 = boto.connect_route53() zone_name = "{}.".format(get_tld("http://{}".format(domain))) zone = route53.get_zone(zone_name) if zone is None: raise Exception("Cannot find zone {}".format(zone_name)) full_domain = "{}.".format(domain) a_record = zone.get_a(full_domain) if not a_record: print '-----> Creating ALIAS for {} to S3'.format(full_domain) changes = ResourceRecordSets(route53, zone.id) change_a = changes.add_change('CREATE', full_domain, 'A') change_a.set_alias(alias_hosted_zone_id=s3_website_region[1], alias_dns_name=s3_website_region[0]) #change_cname = records.add_change('CREATE', 'www.' + full_domain, 'CNAME') #change_cname.add_value(b_www.get_website_endpoint()) changes.commit() else: print '-----> ALIAS for {} to S3 already exists'.format(full_domain) print ' {}'.format(a_record) if a_record.alias_dns_name != s3_website_region[0]: print ' WARNING: Alias DNS name is {}, but should be {}'.format(a_record.alias_dns_name, s3_website_region[0]) if a_record.alias_hosted_zone_id != s3_website_region[1]: print ' WARNING: Alias hosted zone ID is {}, but should be {}'.format(a_record.alias_hosted_zone_id, s3_website_region[1]) if a_record.name != full_domain: print ' WARNING: Domain is {}, but should be {}'.format(a_record.name, full_domain) if a_record.type != 'A': print ' WARNING: Record type is {}, but should be {}'.format(a_record.type, 'A') print '=====> DONE!'
def deploy_static(app_name, env_name, domain, force): app = App(env_name, app_name) bucket_name = domain or '{}-{}'.format( config.get('system_name', uuid.uuid1().hex), app.repo.name) app.repo.fetch() version = app.repo.head_commit_id() s3 = boto.connect_s3() b = s3.lookup(bucket_name) if b is not None: version_key = b.get_key('__VERSION__') if version_key is not None: current_version = version_key.get_metadata('git-version') if version == current_version: if force: print '-----> Version {} already deployed, but re-deploying anyway'.format( version) else: print '-----> Version {} already deployed!'.format(version) return with lcd(app.repo.path): build_cmd = app.config.get('build_script') if build_cmd: print '-----> Building' local(build_cmd) if b is None: print '-----> Creating bucket {}'.format(bucket_name) b = s3.create_bucket(bucket_name) # TODO: this policy allows all users read access to all objects. # Need to find a way to limit access to __VERSION__ to only authenticated # users. public_access_policy = json.dumps({ "Version": "2012-10-17", "Statement": [{ "Sid": "PublicReadForGetBucketObjects", "Effect": "Allow", "Principal": "*", "Action": ["s3:GetObject"], "Resource": ["arn:aws:s3:::{}/*".format(bucket_name)] }] }) b.set_policy(public_access_policy) #b.configure_versioning(versioning=False) b.configure_website(suffix="index.html", error_key="error.html") def map_key_to_obj(m, obj): if obj.key != '__VERSION__': m[obj.key] = obj return m existing_keys = reduce(map_key_to_obj, b.get_all_keys(), {}) root = normpath(join(app.repo.path, app.config.get('root_dir', ''))) app_redirects = app.config.get('redirects', {}) for key_name in app_redirects.keys(): existing_keys.pop(key_name, None) print '-----> Uploading {} to {} bucket'.format(root, bucket_name) new_keys = [] updated_keys = [] for dirname, dirnames, filenames in walk(root): reldirname = relpath(dirname, root) reldirname = '' if reldirname == '.' else reldirname if os.path.commonprefix(['.git', reldirname]) == '.git': continue for filename in filenames: full_filename = join(reldirname, filename) if full_filename == '.s3': continue new_or_update = ' ' if existing_keys.has_key(full_filename): new_or_update = '[UPDATE]' updated_keys.append(full_filename) key = existing_keys.pop(full_filename) else: new_or_update = '[NEW] ' new_keys.append(full_filename) key = b.new_key(full_filename) print ' {} Uploading {}'.format(new_or_update, full_filename) key.set_contents_from_filename(join(dirname, filename)) if len(existing_keys) > 0: print '-----> WARNING: the following files are still present but no' print ' longer part of the website:' for k, v in existing_keys.iteritems(): print ' {}'.format(k) print '-----> Tagging bucket with git version {}'.format(version) version_key = b.get_key('__VERSION__') if version_key: version_key.delete() version_key = b.new_key('__VERSION__') version_key.set_metadata('git-version', version) version_key.set_contents_from_string('') print '-----> Setting up redirects' app_redirects = app.config.get('redirects', {}) if len(app_redirects) == 0: print ' No redirects.' else: def get_or_new_key(bucket, name): key = bucket.get_key(name) if key is not None: key.delete() return bucket.new_key(name) elb = boto.connect_elb() pybars_compiler = pybars.Compiler() for key_name, redirect_source in app_redirects.iteritems(): redirect_template = pybars_compiler.compile(redirect_source) app_redirects[key_name] = redirect_template data = { 'webui_dns': elb.get_all_load_balancers( load_balancer_names=['{}-web-ui'.format(env_name)])[0].dns_name } for key_name, redirect_template in app_redirects.iteritems(): k = get_or_new_key(b, key_name) redirect = unicode(redirect_template(data)) print ' Redirect {} to {}'.format(key_name, redirect) k.set_redirect(redirect) print '=====> Deployed to {}!'.format(b.get_website_endpoint()) if domain is not None: # TODO: support redirection from www.<domain> # b_www = 'www.{}'.format(bucket_name) ec2 = boto.connect_ec2() region_name = first([ z.region.name for z in ec2.get_all_zones() if z.name == config['availability_zone'] ]) s3_website_region = s3_website_regions[region_name] route53 = boto.connect_route53() zone_name = "{}.".format(get_tld("http://{}".format(domain))) zone = route53.get_zone(zone_name) if zone is None: raise Exception("Cannot find zone {}".format(zone_name)) full_domain = "{}.".format(domain) a_record = zone.get_a(full_domain) if not a_record: print '-----> Creating ALIAS for {} to S3'.format(full_domain) changes = ResourceRecordSets(route53, zone.id) change_a = changes.add_change('CREATE', full_domain, 'A') change_a.set_alias(alias_hosted_zone_id=s3_website_region[1], alias_dns_name=s3_website_region[0]) #change_cname = records.add_change('CREATE', 'www.' + full_domain, 'CNAME') #change_cname.add_value(b_www.get_website_endpoint()) changes.commit() else: print '-----> ALIAS for {} to S3 already exists'.format( full_domain) print ' {}'.format(a_record) if a_record.alias_dns_name != s3_website_region[0]: print ' WARNING: Alias DNS name is {}, but should be {}'.format( a_record.alias_dns_name, s3_website_region[0]) if a_record.alias_hosted_zone_id != s3_website_region[1]: print ' WARNING: Alias hosted zone ID is {}, but should be {}'.format( a_record.alias_hosted_zone_id, s3_website_region[1]) if a_record.name != full_domain: print ' WARNING: Domain is {}, but should be {}'.format( a_record.name, full_domain) if a_record.type != 'A': print ' WARNING: Record type is {}, but should be {}'.format( a_record.type, 'A') print '=====> DONE!'
def get_availability_zones(): config = Config() ec2 = boto.ec2.connect_to_region(config.get("region")) return ec2.get_all_zones()