def resolve_security_groups(self): filters = {} self.log.info("Resolving security groups") # If the server is being spun up in a vpc, search only that vpc exists = lambda s: s in [group.name for group in self.ec2.get_all_security_groups() if self.vpc_id == group.vpc_id] for index, group in enumerate(self.security_groups): if not exists(group): self.log.info('Security Group {group} does not exist'.format( group=group)) if self.subnet_id is None: self.ec2.create_security_group(group, group) else: vpc_conn = VPCConnection() vpc_conn.create_security_group( group, group, vpc_id=self.vpc_id) self.log.info('Created security group {group}'.format( group=group)) else: self.log.info('Security Group {group} already exists'.format( group=group))
def delete_vpc(region, name): conn = VPCConnection(region=region) vpc = get_vpc_by_name(conn, name) if not vpc: print "VPC %s does not exist. Nothing to delete" % name else: delete_sgs_for_vpc(conn, vpc, name) delete_route_tables_for_vpc(conn, vpc, name) delete_igs_for_vpc(conn, vpc, name) delete_subnets_for_vpc(conn, vpc, name) print "Deleting VPC %s..." % name success = False for i in range(0, 100): try: conn.delete_vpc(vpc.id) success = True break except EC2ResponseError as ex: if ex.error_code == "DependencyViolation": print "WARN(%s): Sleeping 2 and retrying" % ex.error_code time.sleep(2) continue else: raise if not success: raise Exception("Could not delete vpc")
def client(self, aws_config=None): """Represents the VPCConnection Client """ aws_config_property = (self._get_aws_config_property(aws_config) or self._get_aws_config_from_file()) if not aws_config_property: return VPCConnection() elif aws_config_property.get('ec2_region_name'): region_object = \ get_region(aws_config_property['ec2_region_name']) aws_config = aws_config_property.copy() if region_object and 'ec2_region_endpoint' in aws_config_property: region_object.endpoint = \ aws_config_property['ec2_region_endpoint'] aws_config['region'] = region_object else: aws_config = aws_config_property.copy() if 'ec2_region_name' in aws_config: del (aws_config['ec2_region_name']) # for backward compatibility, # delete this key before passing config to Boto if 'ec2_region_endpoint' in aws_config: del (aws_config["ec2_region_endpoint"]) return VPCConnection(**aws_config)
def run(self, terms, variables=None, **kwargs): ret = [] # Below i will implement valid_lookups = { 'id' : None 'dhcp_options_id' : None 'state' : None 'cidr_block' : None 'is_default' : None 'instance_tenancy' : None 'classic_link_enabled' : None } conn = VPCConnection() for term in terms: params = term.split(' ') key_to_lookup = params[0] try: assert( key_to_lookup in valid_lookups ) except (AssertionError) as e: raise AnsibleError(e) vpc_filter = {} for param in params[1:]: tag, value = param.split('=') vpc_filter.update({'tag:'+ tag : value}) vpcs = conn.get_all_vpcs(None, vpc_filter) if len(vpcs) > 1: ret = [ x.get( key_to_lookup ) for x in vpcs ] return ret
class DiscoEIP(object): """ A simple class to manage EIP's """ def __init__(self): self.vpc_conn = VPCConnection() def list(self): """Returns all of our currently allocated EIPs""" return self.vpc_conn.get_all_addresses() def allocate(self): """Allocates a new VPC EIP""" return self.vpc_conn.allocate_address(domain='vpc') def release(self, eip_address, force=False): """ Releases an EIP. If it is currently associated with a machine we do not release it unless the force param is set. """ eip = self.vpc_conn.get_all_addresses([eip_address])[0] if eip.association_id: if force: eip.disassociate() else: return False return eip.release()
def resolve_security_groups(self): self.log.info("Resolving security groups") # If the server is being spun up in a vpc, search only that vpc exists = lambda s: s in [ group.name for group in self.ec2.get_all_security_groups() if self.vpc_id == group.vpc_id ] for index, group in enumerate(self.security_groups): if not exists(group): self.log.info('Security Group {group} does not exist'.format( group=group)) if self.subnet_id is None: self.ec2.create_security_group(group, group) else: vpc_conn = VPCConnection() vpc_conn.create_security_group(group, group, vpc_id=self.vpc_id) self.log.info( 'Created security group {group}'.format(group=group)) else: self.log.info('Security Group {group} already exists'.format( group=group))
def get_subnet2(name, region): vpc = VPCConnection(aws_access_key_id=access_key, aws_secret_access_key=secret_key) subnet = vpc.get_all_subnets(filters={'tag-value': name}) subnetid = str(subnet) subnetid = (subnetid.split(":"))[1] subnetid = subnetid.replace("]", '') return subnetid
def setUp(self): """ Setup method to initialize vpc_connection objectq """ super(TestVPCConnection, self).setUp() self.vpc_connection = VPCConnection( aws_access_key_id='aws_access_key_id', aws_secret_access_key='aws_secret_access_key')
class DiscoEIP(object): """ A simple class to manage EIP's """ def __init__(self): self.vpc_conn = VPCConnection() self.ec2_conn = boto3.client('ec2') def list(self): """Returns all of our currently allocated EIPs""" return self.vpc_conn.get_all_addresses() def allocate(self): """Allocates a new VPC EIP""" return self.vpc_conn.allocate_address(domain='vpc') def tag_dynamic(self, eip_allocation_id): """ Tag an EIP as dynamic "Tags": [ { "Key": "dynamic", "Value": "true" } ] """ return throttled_call(self.ec2_conn.create_tags, Resources=[eip_allocation_id], Tags=[{ 'Key': 'dynamic', 'Value': 'true' }]) def release(self, eip_address, force=False): """ Releases an EIP. If it is currently associated with a machine we do not release it unless the force param is set. """ eip = self.vpc_conn.get_all_addresses([eip_address])[0] if eip.association_id: if force: eip.disassociate() else: return False return eip.release() def find_eip_address(self, eip): """ Finds the EIP Address for the public eip specified. """ address_filter = {'public-ip': eip} try: return self.vpc_conn.get_all_addresses(filters=address_filter)[0] except IndexError: return None
def delete_peerings(vpc_id=None): """Delete peerings. If vpc_id is specified, delete all peerings of the VPCs only""" vpc_conn = VPCConnection() for peering in DiscoVPC.list_peerings(vpc_id): try: logging.info('deleting peering connection %s', peering.id) vpc_conn.delete_vpc_peering_connection(peering.id) except EC2ResponseError: raise RuntimeError('Failed to delete VPC Peering connection {}'.format(peering.id))
def create_subnet(vpc_id, cidr_block, availability_zone, subnet_name, region): vpc = VPCConnection(aws_access_key_id=access_key, aws_secret_access_key=secret_key) datacenters = vpc.create_subnet(vpc_id=vpc_id, cidr_block=cidr_block, availability_zone=availability_zone) ec2_conn = boto.ec2.connect_to_region(region, aws_access_key_id=access_key, aws_secret_access_key=secret_key) time.sleep(1) ec2_conn.create_tags(datacenters.id, {"Name": subnet_name, "Project": 'LiveLiveProd'}) return datacenters.id
def find_vpc_id_by_name(vpc_name): """Find VPC by name""" vpc_conn = VPCConnection() vpc_ids = vpc_conn.get_all_vpcs(filters={'tag:Name': vpc_name}) if len(vpc_ids) == 1: return vpc_ids[0].id elif len(vpc_ids) == 0: raise VPCNameNotFound("No VPC is named as {}".format(vpc_name)) else: raise MultipleVPCsForVPCNameError("More than 1 VPC is named as {}".format(vpc_name))
def create_nacl (vpc_id, region, network_aclname): vpc = VPCConnection(aws_access_key_id=access_key, aws_secret_access_key=secret_key) network = vpc.create_network_acl(vpc_id) time.sleep(1) ec2_conn = boto.ec2.connect_to_region(region, aws_access_key_id=access_key, aws_secret_access_key=secret_key) ec2_conn.create_tags(network.id, {"Name": network_aclname , "Project": 'LiveLiveProd'}) return network.id
class TestVPCConnection(unittest.TestCase): """ Test class for `boto.vpc.VPCConnection` """ def setUp(self): """ Setup method to initialize vpc_connection objectq """ super(TestVPCConnection, self).setUp() self.vpc_connection = VPCConnection( aws_access_key_id="aws_access_key_id", aws_secret_access_key="aws_secret_access_key" ) def test_detach_internet_gateway(self): """ Tests detach_internet_gateway with all valid parameters """ internet_gateway_id = "mock_gateway_id" vpc_id = "mock_vpc_id" def get_status(status, params): if ( status == "DetachInternetGateway" and params["InternetGatewayId"] == internet_gateway_id and params["VpcId"] == vpc_id ): return True else: return False self.vpc_connection.get_status = get_status status = self.vpc_connection.detach_internet_gateway(internet_gateway_id, vpc_id) self.assertEquals(True, status) def test_replace_route_table_association(self): """ Tests replace_route_table_assocation with all valid parameters """ association_id = "mock_association_id" route_table_id = "mock_route_table_id" def get_status(status, params): if ( status == "ReplaceRouteTableAssociation" and params["AssociationId"] == association_id and params["RouteTableId"] == route_table_id ): return True else: return False self.vpc_connection.get_status = get_status status = self.vpc_connection.replace_route_table_assocation(association_id, route_table_id) self.assertEquals(True, status)
class DiscoEIP(object): """ A simple class to manage EIP's """ def __init__(self): self.vpc_conn = VPCConnection() self.ec2_conn = boto3.client('ec2') def list(self): """Returns all of our currently allocated EIPs""" return self.vpc_conn.get_all_addresses() def allocate(self): """Allocates a new VPC EIP""" return self.vpc_conn.allocate_address(domain='vpc') def tag_dynamic(self, eip_allocation_id): """ Tag an EIP as dynamic "Tags": [ { "Key": "dynamic", "Value": "true" } ] """ return throttled_call(self.ec2_conn.create_tags, Resources=[eip_allocation_id], Tags=[{'Key': 'dynamic', 'Value': 'true'}]) def release(self, eip_address, force=False): """ Releases an EIP. If it is currently associated with a machine we do not release it unless the force param is set. """ eip = self.vpc_conn.get_all_addresses([eip_address])[0] if eip.association_id: if force: eip.disassociate() else: return False return eip.release() def find_eip_address(self, eip): """ Finds the EIP Address for the public eip specified. """ address_filter = {'public-ip': eip} try: return self.vpc_conn.get_all_addresses(filters=address_filter)[0] except IndexError: return None
def setUpClass(self): time_str = str(int(time.time())) self.route53 = Route53Connection() self.base_domain = 'boto-private-zone-test-%s.com' % time_str self.vpc = VPCConnection() self.test_vpc = self.vpc.create_vpc(cidr_block='10.11.0.0/16') # tag the vpc to make it easily identifiable if things go spang self.test_vpc.add_tag("Name", self.base_domain) self.zone = self.route53.get_zone(self.base_domain) if self.zone is not None: self.zone.delete()
def get_subnet_vpc_id(self, subnet_id): vpc_conn = VPCConnection() subnets = vpc_conn.get_all_subnets( filters={'subnet_id': subnet_id}) if len(subnets) == 1: vpc_id = subnets[0].vpc_id return vpc_id elif len(subnets) == 0: raise NoSubnetReturned("No subnets returned") else: raise Exception("More than 1 subnet returned")
def find_vpc_id_by_name(vpc_name): """Find VPC by name""" vpc_conn = VPCConnection() vpc_ids = vpc_conn.get_all_vpcs(filters={'tag:Name': vpc_name}) if len(vpc_ids) == 1: return vpc_ids[0].id elif len(vpc_ids) == 0: raise VPCNameNotFound("No VPC is named as {}".format(vpc_name)) else: raise MultipleVPCsForVPCNameError( "More than 1 VPC is named as {}".format(vpc_name))
def delete_peerings(vpc_id=None): """Delete peerings. If vpc_id is specified, delete all peerings of the VPCs only""" vpc_conn = VPCConnection() for peering in DiscoVPC.list_peerings(vpc_id): try: logging.info('deleting peering connection %s', peering.id) vpc_conn.delete_vpc_peering_connection(peering.id) except EC2ResponseError: raise RuntimeError( 'Failed to delete VPC Peering connection {}'.format( peering.id))
def get_subnet_vpc_id(self, subnet_id): vpc_conn = VPCConnection() subnets = vpc_conn.get_all_subnets(filters={'subnet_id': subnet_id}) if len(subnets) == 1: vpc_id = subnets[0].vpc_id return vpc_id elif len(subnets) == 0: raise NoSubnetReturned( "No subnets returned for: {}".format(subnet_id)) else: raise Exception("More than 1 subnet returned")
def _connect_to_region(self, **kwargs): if self._isRegionInfo: return VPCConnection(aws_access_key_id=self.aws_access_key_id, aws_secret_access_key=self.aws_secret_access_key, **kwargs) for region in self.all_region(): if region.name == self.region: self.region = region return VPCConnection(aws_access_key_id=self.aws_access_key_id, aws_secret_access_key=self.aws_secret_access_key, region=self.region, **kwargs)
class TestVPCConnection(unittest.TestCase): """ Test class for `boto.vpc.VPCConnection` """ def setUp(self): """ Setup method to initialize vpc_connection objectq """ super(TestVPCConnection, self).setUp() self.vpc_connection = VPCConnection( aws_access_key_id='aws_access_key_id', aws_secret_access_key='aws_secret_access_key') def test_detach_internet_gateway(self): """ Tests detach_internet_gateway with all valid parameters """ internet_gateway_id = 'mock_gateway_id' vpc_id = 'mock_vpc_id' def get_status(status, params): if status == "DetachInternetGateway" and \ params["InternetGatewayId"] == internet_gateway_id and \ params["VpcId"] == vpc_id: return True else: return False self.vpc_connection.get_status = get_status status = self.vpc_connection.detach_internet_gateway( internet_gateway_id, vpc_id) self.assertEquals(True, status) def test_replace_route_table_association(self): """ Tests replace_route_table_assocation with all valid parameters """ association_id = 'mock_association_id' route_table_id = 'mock_route_table_id' def get_status(status, params): if status == "ReplaceRouteTableAssociation" and \ params["AssociationId"] == association_id and \ params["RouteTableId"] == route_table_id: return True else: return False self.vpc_connection.get_status = get_status status = self.vpc_connection.replace_route_table_assocation( association_id, route_table_id) self.assertEquals(True, status)
def create_nacl(vpc_id, region, network_aclname): vpc = VPCConnection(aws_access_key_id=access_key, aws_secret_access_key=secret_key) network = vpc.create_network_acl(vpc_id) time.sleep(1) ec2_conn = boto.ec2.connect_to_region(region, aws_access_key_id=access_key, aws_secret_access_key=secret_key) ec2_conn.create_tags(network.id, { "Name": network_aclname, "Project": 'LiveLiveProd' }) return network.id
class TestVPCConnection(unittest.TestCase): """ Test class for `boto.vpc.VPCConnection` """ def setUp(self): """ Setup method to initialize vpc_connection objectq """ super(TestVPCConnection, self).setUp() self.vpc_connection = VPCConnection( aws_access_key_id='aws_access_key_id', aws_secret_access_key='aws_secret_access_key') def test_detach_internet_gateway(self): """ Tests detach_internet_gateway with all valid parameters """ internet_gateway_id = 'mock_gateway_id' vpc_id = 'mock_vpc_id' def get_status(status, params): if status == "DetachInternetGateway" and \ params["InternetGatewayId"] == internet_gateway_id and \ params["VpcId"] == vpc_id: return True else: return False self.vpc_connection.get_status = get_status status = self.vpc_connection.detach_internet_gateway( internet_gateway_id, vpc_id) self.assertEquals(True, status)
def aws_connect(creds_fname=None): credentials_ec2 = creds_fname if not credentials_ec2: config = get_cloudsim_config() # log("config: %s" % config) credentials_ec2 = config['boto_path'] ec2_region_name, aws_access_key_id, aws_secret_access_key, region_endpoint\ = read_boto_file(credentials_ec2) if ec2_region_name == 'nova': # TODO: remove hardcoded OpenStack endpoint region = RegionInfo(None, 'cloudsim', region_endpoint) # 172.16.0.201 ec2conn = EC2Connection(aws_access_key_id, aws_secret_access_key, is_secure=False, region=region, port=8773, path='/services/Cloud') vpcconn = VPCConnection(aws_access_key_id, aws_secret_access_key, is_secure=False, region=region, port=8773, path='/services/Cloud') else: region = RegionInfo(None, ec2_region_name, region_endpoint) ec2conn = boto.connect_ec2(aws_access_key_id, aws_secret_access_key, region=region) vpcconn = boto.connect_vpc(aws_access_key_id, aws_secret_access_key, region=region) return ec2conn, vpcconn
def create_subnet(vpc_id, cidr_block, availability_zone, subnet_name, region): vpc = VPCConnection(aws_access_key_id=access_key, aws_secret_access_key=secret_key) datacenters = vpc.create_subnet(vpc_id=vpc_id, cidr_block=cidr_block, availability_zone=availability_zone) ec2_conn = boto.ec2.connect_to_region(region, aws_access_key_id=access_key, aws_secret_access_key=secret_key) time.sleep(1) ec2_conn.create_tags(datacenters.id, { "Name": subnet_name, "Project": 'LiveLiveProd' }) return datacenters.id
def get_subnet_availability_zone(self, subnet_id): self.log.info( "getting zone for subnet {subnet_id}".format(subnet_id=subnet_id)) vpc_conn = VPCConnection() filters = {'subnet-id': subnet_id} subnets = vpc_conn.get_all_subnets(filters=filters) if len(subnets) == 1: availability_zone = subnets[0].availability_zone log_message = 'Subnet {subnet_id} is in ' \ 'availability zone {availability_zone}' self.log.info(log_message.format( subnet_id=subnet_id, availability_zone=availability_zone)) return availability_zone
class TestRoute53PrivateZone(unittest.TestCase): @classmethod def setUpClass(self): time_str = str(int(time.time())) self.route53 = Route53Connection() self.base_domain = 'boto-private-zone-test-%s.com' % time_str self.vpc = VPCConnection() self.test_vpc = self.vpc.create_vpc(cidr_block='10.11.0.0/16') # tag the vpc to make it easily identifiable if things go spang self.test_vpc.add_tag("Name", self.base_domain) self.zone = self.route53.get_zone(self.base_domain) if self.zone is not None: self.zone.delete() def test_create_private_zone(self): self.zone = self.route53.create_hosted_zone(self.base_domain, private_zone=True, vpc_id=self.test_vpc.id, vpc_region='us-east-1') @classmethod def tearDownClass(self): if self.zone is not None: self.zone.delete() self.test_vpc.delete()
def test_db_subnet_group(self): vpc_api = VPCConnection() rds_api = RDSConnection() vpc = vpc_api.create_vpc('10.0.0.0/16') az_list = vpc_api.get_all_zones(filters={'state': 'available'}) subnet = list() n = 0 for az in az_list: try: subnet.append( vpc_api.create_subnet(vpc.id, '10.0.' + str(n) + '.0/24', availability_zone=az.name)) n = n + 1 except: pass grp_name = 'db_subnet_group' + str(int(time.time())) subnet_group = rds_api.create_db_subnet_group( grp_name, grp_name, [subnet[0].id, subnet[1].id]) if not _is_ok(subnet_group, vpc.id, grp_name, [subnet[0].id, subnet[1].id]): raise Exception("create_db_subnet_group returned bad values") rds_api.modify_db_subnet_group(grp_name, description='new description') subnet_grps = rds_api.get_all_db_subnet_groups(name=grp_name) if not _is_ok(subnet_grps[0], vpc.id, 'new description', [subnet[0].id, subnet[1].id]): raise Exception( "modifying the subnet group desciption returned bad values") rds_api.modify_db_subnet_group(grp_name, subnet_ids=[subnet[1].id, subnet[2].id]) subnet_grps = rds_api.get_all_db_subnet_groups(name=grp_name) if not _is_ok(subnet_grps[0], vpc.id, 'new description', [subnet[1].id, subnet[2].id]): raise Exception( "modifying the subnet group subnets returned bad values") rds_api.delete_db_subnet_group(subnet_group.name) try: rds_api.get_all_db_subnet_groups(name=grp_name) raise Exception(subnet_group.name + " still accessible after delete_db_subnet_group") except: pass while n > 0: n = n - 1 vpc_api.delete_subnet(subnet[n].id) vpc_api.delete_vpc(vpc.id)
def setUp(self): """ Setup method to initialize vpc_connection objectq """ super(TestVPCConnection, self).setUp() self.vpc_connection = VPCConnection( aws_access_key_id="aws_access_key_id", aws_secret_access_key="aws_secret_access_key" )
def fetch_environment(cls, vpc_id=None, environment_name=None): """ Returns an instance of this class for the specified VPC, or None if it does not exist """ vpc_conn = VPCConnection() if vpc_id: vpc = vpc_conn.get_all_vpcs(vpc_ids=[vpc_id]) elif environment_name: vpc = vpc_conn.get_all_vpcs(filters={"tag:Name": environment_name}) else: raise VPCEnvironmentError("Expect vpc_id or environment_name") if vpc: vpc = vpc[0] return cls(vpc.tags["Name"], vpc.tags["type"], vpc) else: return None
def discover_vpc(self,vpc_id): try: AWS_ACCESS_KEY_ID,AWS_SECRET_ACCESS_KEY=self.find_aws_creds() except: logger.info("Error: {0}: Cannot retrieve boto credentials".format(__name__)) try: conn_vpc = VPCConnection(aws_access_key_id=AWS_ACCESS_KEY_ID, aws_secret_access_key=AWS_SECRET_ACCESS_KEY) a = conn_vpc.get_all_vpcs() for b in a: if b.tags['Name'].lower() == vpc_id: vpc_id=b.id else: vpc_id='' except: logger.info("Error: {0}: conn_vpc cannot VPCConnection()".format(__name__)) return vpc_id
def create_peering_connections(peering_configs): """ create vpc peering configuration from the peering config dictionary""" vpc_conn = VPCConnection() for peering in peering_configs.keys(): vpc_map = peering_configs[peering]['vpc_map'] vpc_metanetwork_map = peering_configs[peering][ 'vpc_metanetwork_map'] vpc_ids = [vpc.vpc.id for vpc in vpc_map.values()] existing_peerings = vpc_conn.get_all_vpc_peering_connections( filters=[('status-code', 'active'), ('accepter-vpc-info.vpc-id', vpc_ids[0]), ('requester-vpc-info.vpc-id', vpc_ids[1])] ) + vpc_conn.get_all_vpc_peering_connections( filters=[('status-code', 'active'), ('accepter-vpc-info.vpc-id', vpc_ids[1]), ('requester-vpc-info.vpc-id', vpc_ids[0])]) # create peering when peering doesn't exist if not existing_peerings: peering_conn = vpc_conn.create_vpc_peering_connection(*vpc_ids) vpc_conn.accept_vpc_peering_connection(peering_conn.id) logging.info("create new peering connection %s for %s", peering_conn.id, peering) else: peering_conn = existing_peerings[0] logging.info("peering connection %s exists for %s", existing_peerings[0].id, peering) DiscoVPC.create_peering_routes(vpc_conn, vpc_map, vpc_metanetwork_map, peering_conn)
def vpc_connect(region_name=config['region']): region = boto.ec2.get_region(region_name=region_name) print region vpcconn = VPCConnection(aws_access_key_id=KEY, aws_secret_access_key=SECRET, region=region) print vpcconn return vpcconn
def get_vpc(connection, aws_access_key_id, aws_secret_access_key): global _vpcs if _vpcs.get(connection.region.name): return _vpcs[connection.region.name] _vpcs[connection.region.name] = VPCConnection( region=connection.region, aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key) return _vpcs[connection.region.name]
def provision_pinger(args): config = args.config process_config(config) aws_config = config["aws_config"] iam_config = config["iam_config"] s3_config = config["s3_config"] vpc_config = config["vpc_config"] as_config = config["autoscale_config"] elb_config = config["elb_config"] misc_config = config["misc"] load_config_from_s3(s3_config) print "Provisioning Pinger %s" % vpc_config["name"] # create connection conn = VPCConnection(region=aws_config["region"]) # create vpc try: create_iam_users_and_policies(aws_config["region_name"], vpc_config["name"], iam_config) update_pinger_cfg(config) upload_pinger_cfg(vpc_config["name"], s3_config) create_nacho_init_sh(config) vpc = create_vpc(conn, vpc_config["name"], vpc_config["vpc_cidr_block"], vpc_config["instance_tenancy"]) subnet = create_subnet(conn, vpc, vpc_config["name"] + "-SN", vpc_config["subnet_cidr_block"], vpc_config["availability_zone"]) ig = create_ig(conn, vpc, vpc_config["name"] + "-IG") rt = update_route_table(conn, vpc, ig, vpc_config["name"] + "-RT") elb_sg_config = config["elb_config"]["sg_config"] elb_sg = create_sg( conn, vpc, vpc_config["name"] + elb_sg_config["name"] + "-SG", elb_sg_config["description"] + " for " + vpc_config["name"]) add_rules_to_sg(conn, elb_sg, elb_sg_config["ingress-rules"], True, misc_config["jumphost"]) add_rules_to_sg(conn, elb_sg, elb_sg_config["egress-rules"], False, misc_config["jumphost"]) elb = create_elb(aws_config["region_name"], vpc, subnet, elb_sg, vpc_config["name"] + "-ELB", elb_config, s3_config["first_cert_pem"]) ins_sg_config = config["autoscale_config"]["sg_config"] ins_sg = create_sg( conn, vpc, vpc_config["name"] + ins_sg_config["name"] + "-SG", ins_sg_config["description"] + " for " + vpc_config["name"]) add_rules_to_sg(conn, ins_sg, ins_sg_config["ingress-rules"], True, misc_config["jumphost"]) add_rules_to_sg(conn, ins_sg, ins_sg_config["egress-rules"], False, misc_config["jumphost"]) ascaler = create_autoscaler(aws_config["region_name"], vpc, elb, subnet, ins_sg, vpc_config["name"] + "-AS", aws_config, as_config) except (BotoServerError, S3ResponseError, EC2ResponseError) as e: print "Error :%s(%s):%s" % (e.error_code, e.status, e.message) print traceback.format_exc() cleanup(config)
def _get_vpc_details(self): """ Create dictonary with VPC's info and tags """ # Connect to AWS for VPC vpc_conn = VPCConnection() # Grab VPCs vpcs_data = vpc_conn.get_all_vpcs() for vpc in vpcs_data: tags = {} tags['state'] = vpc.state tags['cidr'] = vpc.cidr_block for key, value in vpc.tags.items(): tags[key.lower()] = value.lower() # assing vpcs to class vpcs self.vpcs = {vpc.id: tags}
def getOtherResourceApartFromEC2Instances(self): #all excpetion will be ignored since other data is only for info sake try: self.netIntf = self.conn.get_all_network_interfaces() except: pass try: self.eipList = self.conn.get_all_addresses() except: pass try: self.keypair = self.conn.get_all_key_pairs() except: pass try: self.sgList = self.conn.get_all_security_groups() except: pass try: self.volList = self.conn.get_all_volumes() except: pass for key in self.insts.keys(): if len(self.insts[key]) > 0: for inst in self.insts[key]: img_present = False for img in self.amiList: if img.id == inst.image_id: img_present = True break if img_present == False: self.amiList.append( self.conn.get_image( inst.botoInstInfo.image_id)) try: vpcConn = VPCConnection( aws_access_key_id=self.conf.access_key_id, aws_secret_access_key=self.conf.access_key_sec, debug=10) self.vpcList = vpcConn.get_all_vpcs() except: pass
def _configure_environment(self): """Create a new disco style environment VPC""" vpc_cidr = self.get_config("vpc_cidr") # Create VPC vpc_conn = VPCConnection() self.vpc = vpc_conn.create_vpc(self.get_config("vpc_cidr")) keep_trying(300, self.vpc.add_tag, "Name", self.environment_name) keep_trying(300, self.vpc.add_tag, "type", self.environment_type) logging.debug("vpc: %s", self.vpc) dhcp_options = self._configure_dhcp() self.vpc.connection.associate_dhcp_options(dhcp_options.id, self.vpc.id) # Enable DNS vpc_conn.modify_vpc_attribute(self.vpc.id, enable_dns_support=True) vpc_conn.modify_vpc_attribute(self.vpc.id, enable_dns_hostnames=True) # Create metanetworks (subnets, route_tables and security groups) for network in self.networks.itervalues(): network.create() # Configure security group rules for network in self.networks.values(): self._add_sg_rules(network) # Set up security group rules self._open_customer_ports() # Allow ICMP (ping, traceroute & etc) and DNS traffic for all subnets for network in self.networks.itervalues(): self.vpc.connection.authorize_security_group( group_id=network.security_group.id, ip_protocol="icmp", from_port=-1, to_port=-1, cidr_ip=vpc_cidr) self.vpc.connection.authorize_security_group( group_id=network.security_group.id, ip_protocol="udp", from_port=53, to_port=53, cidr_ip=vpc_cidr) # Setup internet gateway internet_gateway = self.vpc.connection.create_internet_gateway() self.vpc.connection.attach_internet_gateway(internet_gateway.id, self.vpc.id) logging.debug("internet_gateway: %s", internet_gateway) self._add_igw_routes(internet_gateway) self._attach_vgw() self.configure_notifications() DiscoVPC.create_peering_connections( DiscoVPC.parse_peerings_config(self.vpc.id)) self.rds.update_all_clusters_in_vpc()
def get_vpc_id(self, env): """ Return VPC ID from Stack Name """ # VPC connection self.api_call() region = boto.ec2.get_region("us-west-2") vpc_conn = VPCConnection(region=region) # get dict of VPC vpc_dict = {} for vpc in vpc_conn.get_all_vpcs(): vpc_dict[vpc.tags.get("Env")] = vpc.id # search Env in VPC tag name if env not in vpc_dict: print "VPC name not found." sys.exit(1) else: return vpc_dict[env]
def get_vpc_id(self, env): """ Return VPC ID from Stack Name """ # VPC connection self.api_call() region = boto.ec2.get_region('us-west-2') vpc_conn = VPCConnection(region=region) # get dict of VPC vpc_dict = {} for vpc in vpc_conn.get_all_vpcs(): vpc_dict[vpc.tags.get('Env')] = vpc.id # search Env in VPC tag name if env not in vpc_dict: print "VPC name not found." sys.exit(1) else: return vpc_dict[env]
def init_connection(conf): # Load general configuration file cnf = ConfigParser() try: cnf.read(conf) region_os = EC2RegionInfo(endpoint=cnf.get('outscale', 'endpoint')) ows = VPCConnection(cnf.get('outscale', 'access_key'), cnf.get('outscale', 'secret_key'), region=region_os) return ows except Exception as e: print 'error in the configuration file: ', e return None
def test_delete_vpc_peering_connection(self): vpc_conn = VPCConnection(aws_access_key_id='aws_access_key_id', aws_secret_access_key='aws_secret_access_key') mock_response = mock.Mock() mock_response.read.return_value = self.DESCRIBE_VPC_PEERING_CONNECTIONS mock_response.status = 200 vpc_conn.make_request = mock.Mock(return_value=mock_response) vpc_peering_connections = vpc_conn.get_all_vpc_peering_connections() self.assertEquals(1, len(vpc_peering_connections)) vpc_peering_connection = vpc_peering_connections[0] mock_response = mock.Mock() mock_response.read.return_value = self.DELETE_VPC_PEERING_CONNECTION mock_response.status = 200 vpc_conn.make_request = mock.Mock(return_value=mock_response) self.assertEquals(True, vpc_peering_connection.delete()) self.assertIn('DeleteVpcPeeringConnection', vpc_conn.make_request.call_args_list[0][0]) self.assertNotIn('DeleteVpc', vpc_conn.make_request.call_args_list[0][0])
def list_peerings(vpc_id=None, include_failed=False): """ Return list of live vpc peering connection id. If vpc_id is given, return only that vpcs peerings Peerings that cannot be manipulated are ignored. """ vpc_conn = VPCConnection() if vpc_id: peerings = vpc_conn.get_all_vpc_peering_connections( filters=[('requester-vpc-info.vpc-id', vpc_id)]) + vpc_conn.get_all_vpc_peering_connections( filters=[('accepter-vpc-info.vpc-id', vpc_id)]) else: peerings = vpc_conn.get_all_vpc_peering_connections() peering_states = LIVE_PEERING_STATES + (["failed"] if include_failed else []) return [ peering for peering in peerings if peering.status_code in peering_states ]
def create_peering_connections(peering_configs): """ create vpc peering configuration from the peering config dictionary""" vpc_conn = VPCConnection() for peering in peering_configs.keys(): vpc_map = peering_configs[peering]['vpc_map'] vpc_metanetwork_map = peering_configs[peering]['vpc_metanetwork_map'] vpc_ids = [vpc.vpc.id for vpc in vpc_map.values()] existing_peerings = vpc_conn.get_all_vpc_peering_connections( filters=[ ('status-code', 'active'), ('accepter-vpc-info.vpc-id', vpc_ids[0]), ('requester-vpc-info.vpc-id', vpc_ids[1]) ] ) + vpc_conn.get_all_vpc_peering_connections( filters=[ ('status-code', 'active'), ('accepter-vpc-info.vpc-id', vpc_ids[1]), ('requester-vpc-info.vpc-id', vpc_ids[0]) ] ) # create peering when peering doesn't exist if not existing_peerings: peering_conn = vpc_conn.create_vpc_peering_connection(*vpc_ids) vpc_conn.accept_vpc_peering_connection(peering_conn.id) logging.info("create new peering connection %s for %s", peering_conn.id, peering) else: peering_conn = existing_peerings[0] logging.info("peering connection %s exists for %s", existing_peerings[0].id, peering) DiscoVPC.create_peering_routes(vpc_conn, vpc_map, vpc_metanetwork_map, peering_conn)
def list_peerings(vpc_id=None, include_failed=False): """ Return list of live vpc peering connection id. If vpc_id is given, return only that vpcs peerings Peerings that cannot be manipulated are ignored. """ vpc_conn = VPCConnection() if vpc_id: peerings = vpc_conn.get_all_vpc_peering_connections( filters=[('requester-vpc-info.vpc-id', vpc_id)] ) + vpc_conn.get_all_vpc_peering_connections( filters=[('accepter-vpc-info.vpc-id', vpc_id)] ) else: peerings = vpc_conn.get_all_vpc_peering_connections() peering_states = LIVE_PEERING_STATES + (["failed"] if include_failed else []) return [ peering for peering in peerings if peering.status_code in peering_states ]
def getOtherResourceApartFromEC2Instances(self): #all excpetion will be ignored since other data is only for info sake try : self.netIntf = self.conn.get_all_network_interfaces() except: pass try : self.eipList = self.conn.get_all_addresses() except: pass try : self.keypair = self.conn.get_all_key_pairs() except : pass try : self.sgList = self.conn.get_all_security_groups() except: pass try : self.volList = self.conn.get_all_volumes() except : pass for key in self.insts.keys(): if len(self.insts[key]) > 0 : for inst in self.insts[key]: img_present = False for img in self.amiList: if img.id == inst.image_id: img_present = True break if img_present == False : self.amiList.append(self.conn.get_image(inst.botoInstInfo.image_id)) try : vpcConn =VPCConnection(aws_access_key_id=self.conf.access_key_id,aws_secret_access_key=self.conf.access_key_sec,debug=10) self.vpcList=vpcConn.get_all_vpcs() except: pass
def _configure_environment(self): """Create a new disco style environment VPC""" vpc_cidr = self.get_config("vpc_cidr") # Create VPC vpc_conn = VPCConnection() self.vpc = vpc_conn.create_vpc(self.get_config("vpc_cidr")) keep_trying(300, self.vpc.add_tag, "Name", self.environment_name) keep_trying(300, self.vpc.add_tag, "type", self.environment_type) logging.debug("vpc: %s", self.vpc) dhcp_options = self._configure_dhcp() self.vpc.connection.associate_dhcp_options(dhcp_options.id, self.vpc.id) # Enable DNS vpc_conn.modify_vpc_attribute(self.vpc.id, enable_dns_support=True) vpc_conn.modify_vpc_attribute(self.vpc.id, enable_dns_hostnames=True) # Create metanetworks (subnets, route_tables and security groups) for network in self.networks.itervalues(): network.create() # Configure security group rules for network in self.networks.values(): self._add_sg_rules(network) # Set up security group rules self._open_customer_ports() # Allow ICMP (ping, traceroute & etc) and DNS traffic for all subnets for network in self.networks.itervalues(): self.vpc.connection.authorize_security_group( group_id=network.security_group.id, ip_protocol="icmp", from_port=-1, to_port=-1, cidr_ip=vpc_cidr ) self.vpc.connection.authorize_security_group( group_id=network.security_group.id, ip_protocol="udp", from_port=53, to_port=53, cidr_ip=vpc_cidr ) # Setup internet gateway internet_gateway = self.vpc.connection.create_internet_gateway() self.vpc.connection.attach_internet_gateway(internet_gateway.id, self.vpc.id) logging.debug("internet_gateway: %s", internet_gateway) self._add_igw_routes(internet_gateway) self._attach_vgw() self.configure_notifications() DiscoVPC.create_peering_connections(DiscoVPC.parse_peerings_config(self.vpc.id)) self.rds.update_all_clusters_in_vpc()
def test_db_subnet_group(self): vpc_api = VPCConnection() rds_api = RDSConnection() vpc = vpc_api.create_vpc('10.0.0.0/16') az_list = vpc_api.get_all_zones(filters={'state':'available'}) subnet = list() n = 0; for az in az_list: try: subnet.append(vpc_api.create_subnet(vpc.id, '10.0.'+str(n)+'.0/24',availability_zone=az.name)) n = n+1 except: pass grp_name = 'db_subnet_group'+str(int(time.time())) subnet_group = rds_api.create_db_subnet_group(grp_name, grp_name, [subnet[0].id,subnet[1].id]) if not _is_ok(subnet_group, vpc.id, grp_name, [subnet[0].id,subnet[1].id]): raise Exception("create_db_subnet_group returned bad values") rds_api.modify_db_subnet_group(grp_name, description='new description') subnet_grps = rds_api.get_all_db_subnet_groups(name=grp_name) if not _is_ok(subnet_grps[0], vpc.id, 'new description', [subnet[0].id,subnet[1].id]): raise Exception("modifying the subnet group desciption returned bad values") rds_api.modify_db_subnet_group(grp_name, subnet_ids=[subnet[1].id,subnet[2].id]) subnet_grps = rds_api.get_all_db_subnet_groups(name=grp_name) if not _is_ok(subnet_grps[0], vpc.id, 'new description', [subnet[1].id,subnet[2].id]): raise Exception("modifying the subnet group subnets returned bad values") rds_api.delete_db_subnet_group(subnet_group.name) try: rds_api.get_all_db_subnet_groups(name=grp_name) raise Exception(subnet_group.name+" still accessible after delete_db_subnet_group") except: pass while n > 0: n = n-1 vpc_api.delete_subnet(subnet[n].id) vpc_api.delete_vpc(vpc.id)
def create_instance_args(): """ Looks up security group, subnet and returns arguments to pass into ec2.run_instances() including user data """ vpc = VPCConnection() subnet = vpc.get_all_subnets( filters={ 'tag:aws:cloudformation:stack-name': stack_name, 'tag:play': args.play} ) if len(subnet) < 1: sys.stderr.write("ERROR: Expected at least one subnet, got {}\n".format( len(subnet))) sys.exit(1) subnet_id = subnet[0].id vpc_id = subnet[0].vpc_id security_group_id = get_instance_sec_group(vpc_id) if args.identity: config_secure = 'true' with open(args.identity) as f: identity_contents = f.read() else: config_secure = 'false' identity_contents = "dummy" user_data = """#!/bin/bash set -x set -e exec > >(tee /var/log/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1 base_dir="/var/tmp/edx-cfg" extra_vars="$base_dir/extra-vars-$$.yml" secure_identity="$base_dir/secure-identity" git_ssh="$base_dir/git_ssh.sh" configuration_version="{configuration_version}" configuration_secure_version="{configuration_secure_version}" configuration_private_version="{configuration_private_version}" environment="{environment}" deployment="{deployment}" play="{play}" config_secure={config_secure} git_repo_name="configuration" git_repo="https://github.com/edx/$git_repo_name" git_repo_secure="{configuration_secure_repo}" git_repo_secure_name=$(basename $git_repo_secure .git) git_repo_private="{configuration_private_repo}" git_repo_private_name=$(basename $git_repo_private .git) secure_vars_file={secure_vars_file} environment_deployment_secure_vars="$base_dir/$git_repo_secure_name/ansible/vars/{environment}-{deployment}.yml" deployment_secure_vars="$base_dir/$git_repo_secure_name/ansible/vars/{deployment}.yml" instance_id=\\ $(curl http://169.254.169.254/latest/meta-data/instance-id 2>/dev/null) instance_ip=\\ $(curl http://169.254.169.254/latest/meta-data/local-ipv4 2>/dev/null) instance_type=\\ $(curl http://169.254.169.254/latest/meta-data/instance-type 2>/dev/null) playbook_dir="$base_dir/{playbook_dir}" if $config_secure; then git_cmd="env GIT_SSH=$git_ssh git" else git_cmd="git" fi ANSIBLE_ENABLE_SQS=true SQS_NAME={queue_name} SQS_REGION=us-east-1 SQS_MSG_PREFIX="[ $instance_id $instance_ip $environment-$deployment $play ]" PYTHONUNBUFFERED=1 HIPCHAT_TOKEN={hipchat_token} HIPCHAT_ROOM={hipchat_room} HIPCHAT_MSG_PREFIX="$environment-$deployment-$play: " HIPCHAT_FROM="ansible-$instance_id" HIPCHAT_MSG_COLOR=$(echo -e "yellow\\ngreen\\npurple\\ngray" | shuf | head -1) # environment for ansible export ANSIBLE_ENABLE_SQS SQS_NAME SQS_REGION SQS_MSG_PREFIX PYTHONUNBUFFERED export HIPCHAT_TOKEN HIPCHAT_ROOM HIPCHAT_MSG_PREFIX HIPCHAT_FROM HIPCHAT_MSG_COLOR if [[ ! -x /usr/bin/git || ! -x /usr/bin/pip ]]; then echo "Installing pkg dependencies" /usr/bin/apt-get update /usr/bin/apt-get install -y git python-pip python-apt \\ git-core build-essential python-dev libxml2-dev \\ libxslt-dev curl --force-yes fi rm -rf $base_dir mkdir -p $base_dir cd $base_dir cat << EOF > $git_ssh #!/bin/sh exec /usr/bin/ssh -o StrictHostKeyChecking=no -i "$secure_identity" "\$@" EOF chmod 755 $git_ssh if $config_secure; then cat << EOF > $secure_identity {identity_contents} EOF fi cat << EOF >> $extra_vars --- # extra vars passed into # abbey.py including versions # of all the repositories {extra_vars_yml} # abbey will always run fake migrations # this is so that the application can come # up healthy fake_migrations: true disable_edx_services: true COMMON_TAG_EC2_INSTANCE: true # abbey should never take instances in # and out of elbs elb_pre_post: false EOF chmod 400 $secure_identity $git_cmd clone $git_repo $git_repo_name cd $git_repo_name $git_cmd checkout $configuration_version cd $base_dir if $config_secure; then $git_cmd clone $git_repo_secure $git_repo_secure_name cd $git_repo_secure_name $git_cmd checkout $configuration_secure_version cd $base_dir fi if [[ ! -z $git_repo_private ]]; then $git_cmd clone $git_repo_private $git_repo_private_name cd $git_repo_private_name $git_cmd checkout $configuration_private_version cd $base_dir fi cd $base_dir/$git_repo_name sudo pip install -r requirements.txt cd $playbook_dir if [[ -r "$deployment_secure_vars" ]]; then extra_args_opts+=" -e@$deployment_secure_vars" fi if [[ -r "$environment_deployment_secure_vars" ]]; then extra_args_opts+=" -e@$environment_deployment_secure_vars" fi if $secure_vars_file; then extra_args_opts+=" -e@$secure_vars_file" fi extra_args_opts+=" -e@$extra_vars" ansible-playbook -vvvv -c local -i "localhost," $play.yml $extra_args_opts ansible-playbook -vvvv -c local -i "localhost," stop_all_edx_services.yml $extra_args_opts rm -rf $base_dir """.format( hipchat_token=args.hipchat_api_token, hipchat_room=args.hipchat_room_id, configuration_version=args.configuration_version, configuration_secure_version=args.configuration_secure_version, configuration_secure_repo=args.configuration_secure_repo, configuration_private_version=args.configuration_private_version, configuration_private_repo=args.configuration_private_repo, environment=args.environment, deployment=args.deployment, play=args.play, playbook_dir=args.playbook_dir, config_secure=config_secure, identity_contents=identity_contents, queue_name=run_id, extra_vars_yml=extra_vars_yml, secure_vars_file=secure_vars_file, cache_id=args.cache_id) mapping = BlockDeviceMapping() root_vol = BlockDeviceType(size=args.root_vol_size) mapping['/dev/sda1'] = root_vol ec2_args = { 'security_group_ids': [security_group_id], 'subnet_id': subnet_id, 'key_name': args.keypair, 'image_id': base_ami, 'instance_type': args.instance_type, 'instance_profile_name': args.role_name, 'user_data': user_data, 'block_device_map': mapping, } return ec2_args
instance_states = {} for region in ec2_regions: # Controls which region(s) to model if device.zAWSRegionToModel: if not region.name in device.zAWSRegionToModel: continue region_id = prepId(region.name) region_oms.append(ObjectMap(data={ 'id': region_id, 'title': region.name, })) ec2regionconn = EC2Connection(accesskey, secretkey, region=region) vpcregionconn = VPCConnection(accesskey, secretkey, region=region) sqsconnection = boto.sqs.connect_to_region( region.name, aws_access_key_id=accesskey, aws_secret_access_key=secretkey, validate_certs=False ) # Zones maps['zones'].append( zones_rm( region_id, ec2regionconn.get_all_zones()) ) # VPCs maps['VPCs'].append(
def create_instance_args(): """ Looks up security group, subnet and returns arguments to pass into ec2.run_instances() including user data """ vpc = VPCConnection() subnet = vpc.get_all_subnets( filters={ 'tag:aws:cloudformation:stack-name': stack_name, 'tag:play': args.play} ) if len(subnet) < 1: sys.stderr.write("ERROR: Expected at least one subnet, got {}\n".format( len(subnet))) sys.exit(1) subnet_id = subnet[0].id vpc_id = subnet[0].vpc_id security_group_id = get_instance_sec_group(vpc_id) if args.identity: config_secure = 'true' with open(args.identity) as f: identity_file = f.read() else: config_secure = 'false' identity_file = "dummy" user_data = """#!/bin/bash set -x set -e exec > >(tee /var/log/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1 base_dir="/var/tmp/edx-cfg" extra_vars="$base_dir/extra-vars-$$.yml" secure_identity="$base_dir/secure-identity" git_ssh="$base_dir/git_ssh.sh" configuration_version="{configuration_version}" configuration_secure_version="{configuration_secure_version}" environment="{environment}" deployment="{deployment}" play="{play}" config_secure={config_secure} git_repo_name="configuration" git_repo="https://github.com/edx/$git_repo_name" git_repo_secure="{configuration_secure_repo}" git_repo_secure_name="{configuration_secure_repo_basename}" secure_vars_file="$base_dir/$git_repo_secure_name/{secure_vars}" instance_id=\\ $(curl http://169.254.169.254/latest/meta-data/instance-id 2>/dev/null) instance_ip=\\ $(curl http://169.254.169.254/latest/meta-data/local-ipv4 2>/dev/null) instance_type=\\ $(curl http://169.254.169.254/latest/meta-data/instance-type 2>/dev/null) playbook_dir="$base_dir/configuration/playbooks/edx-east" if $config_secure; then git_cmd="env GIT_SSH=$git_ssh git" else git_cmd="git" fi ANSIBLE_ENABLE_SQS=true SQS_NAME={queue_name} SQS_REGION=us-east-1 SQS_MSG_PREFIX="[ $instance_id $instance_ip $environment-$deployment $play ]" PYTHONUNBUFFERED=1 # environment for ansible export ANSIBLE_ENABLE_SQS SQS_NAME SQS_REGION SQS_MSG_PREFIX PYTHONUNBUFFERED if [[ ! -x /usr/bin/git || ! -x /usr/bin/pip ]]; then echo "Installing pkg dependencies" /usr/bin/apt-get update /usr/bin/apt-get install -y git python-pip python-apt \\ git-core build-essential python-dev libxml2-dev \\ libxslt-dev curl --force-yes fi rm -rf $base_dir mkdir -p $base_dir cd $base_dir cat << EOF > $git_ssh #!/bin/sh exec /usr/bin/ssh -o StrictHostKeyChecking=no -i "$secure_identity" "\$@" EOF chmod 755 $git_ssh if $config_secure; then cat << EOF > $secure_identity {identity_file} EOF fi cat << EOF >> $extra_vars --- # extra vars passed into # abbey.py including versions # of all the repositories {extra_vars_yml} {git_refs_yml} # The private key used for pulling down # private edx-platform repos is the same # identity of the github huser that has # access to the secure vars repo. # EDXAPP_USE_GIT_IDENTITY needs to be set # to true in the extra vars for this # variable to be used. EDXAPP_LOCAL_GIT_IDENTITY: $secure_identity # abbey will always run fake migrations # this is so that the application can come # up healthy fake_migrations: true EOF chmod 400 $secure_identity $git_cmd clone $git_repo $git_repo_name cd $git_repo_name $git_cmd checkout $configuration_version cd $base_dir if $config_secure; then $git_cmd clone $git_repo_secure $git_repo_secure_name cd $git_repo_secure_name $git_cmd checkout $configuration_secure_version cd $base_dir fi cd $base_dir/$git_repo_name sudo pip install -r requirements.txt cd $playbook_dir ansible-playbook -vvvv -c local -i "localhost," $play.yml -e@$secure_vars_file -e@$extra_vars ansible-playbook -vvvv -c local -i "localhost," stop_all_edx_services.yml -e@$secure_vars_file -e@$extra_vars rm -rf $base_dir """.format( configuration_version=args.configuration_version, configuration_secure_version=args.configuration_secure_version, configuration_secure_repo=args.configuration_secure_repo, configuration_secure_repo_basename=os.path.basename( args.configuration_secure_repo), environment=args.environment, deployment=args.deployment, play=args.play, config_secure=config_secure, identity_file=identity_file, queue_name=run_id, extra_vars_yml=extra_vars_yml, git_refs_yml=git_refs_yml, secure_vars=secure_vars) ec2_args = { 'security_group_ids': [security_group_id], 'subnet_id': subnet_id, 'key_name': args.keypair, 'image_id': args.base_ami, 'instance_type': args.instance_type, 'instance_profile_name': args.role_name, 'user_data': user_data, } return ec2_args
def create_instance_args(): """ Looks up security group, subnet and returns arguments to pass into ec2.run_instances() including user data """ security_group_id = None grp_details = ec2.get_all_security_groups() for grp in grp_details: if grp.name == args.security_group: security_group_id = grp.id break if not security_group_id: print "Unable to lookup id for security group {}".format( args.security_group) sys.exit(1) vpc = VPCConnection() subnet = vpc.get_all_subnets( filters={ 'tag:aws:cloudformation:stack-name': stack_name, 'tag:Application': args.application} ) if len(subnet) != 1: sys.stderr.write("ERROR: Expected 1 admin subnet, got {}\n".format( len(subnet))) sys.exit(1) subnet_id = subnet[0].id if args.identity: config_secure = 'true' with open(args.identity) as f: identity_file = f.read() else: config_secure = 'false' identity_file = "dummy" user_data = """#!/bin/bash set -x set -e exec > >(tee /var/log/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1 base_dir="/var/tmp/edx-cfg" extra_vars="$base_dir/extra-vars-$$.yml" secure_identity="$base_dir/secure-identity" git_ssh="$base_dir/git_ssh.sh" configuration_version="{configuration_version}" configuration_secure_version="{configuration_secure_version}" environment="{environment}" deployment="{deployment}" play="{play}" config_secure={config_secure} secure_vars_file="$base_dir/configuration-secure\\ /ansible/vars/$environment/$environment-$deployment.yml" instance_id=\\ $(curl http://169.254.169.254/latest/meta-data/instance-id 2>/dev/null) instance_ip=\\ $(curl http://169.254.169.254/latest/meta-data/local-ipv4 2>/dev/null) instance_type=\\ $(curl http://169.254.169.254/latest/meta-data/instance-type 2>/dev/null) playbook_dir="$base_dir/configuration/playbooks/edx-east" git_repo="https://github.com/edx/configuration" git_repo_secure="[email protected]:edx/configuration-secure" if $config_secure; then git_cmd="env GIT_SSH=$git_ssh git" else git_cmd="git" fi ANSIBLE_ENABLE_SQS=true SQS_NAME={queue_name} SQS_REGION=us-east-1 SQS_MSG_PREFIX="[ $instance_id $instance_ip $environment-$deployment $play ]" PYTHONUNBUFFERED=1 # environment for ansible export ANSIBLE_ENABLE_SQS SQS_NAME SQS_REGION SQS_MSG_PREFIX PYTHONUNBUFFERED if [[ ! -x /usr/bin/git || ! -x /usr/bin/pip ]]; then echo "Installing pkg dependencies" /usr/bin/apt-get update /usr/bin/apt-get install -y git python-pip python-apt \\ git-core build-essential python-dev libxml2-dev \\ libxslt-dev curl --force-yes fi rm -rf $base_dir mkdir -p $base_dir cd $base_dir cat << EOF > $git_ssh #!/bin/sh exec /usr/bin/ssh -o StrictHostKeyChecking=no -i "$secure_identity" "\$@" EOF chmod 755 $git_ssh if $config_secure; then cat << EOF > $secure_identity {identity_file} EOF fi cat << EOF >> $extra_vars {extra_vars_yml} secure_vars: $secure_vars_file EOF chmod 400 $secure_identity $git_cmd clone -b $configuration_version $git_repo if $config_secure; then $git_cmd clone -b $configuration_secure_version \\ $git_repo_secure fi cd $base_dir/configuration sudo pip install -r requirements.txt cd $playbook_dir ansible-playbook -vvvv -c local -i "localhost," $play.yml -e@$extra_vars ansible-playbook -vvvv -c local -i "localhost," datadog.yml -e@$extra_vars ansible-playbook -vvvv -c local -i "localhost," splunkforwarder.yml -e@$extra_vars rm -rf $base_dir """.format( configuration_version=args.configuration_version, configuration_secure_version=args.configuration_secure_version, environment=args.environment, deployment=args.deployment, play=args.play, config_secure=config_secure, identity_file=identity_file, queue_name=run_id, extra_vars_yml=extra_vars_yml) ec2_args = { 'security_group_ids': [security_group_id], 'subnet_id': subnet_id, 'key_name': args.keypair, 'image_id': args.base_ami, 'instance_type': args.instance_type, 'instance_profile_name': args.role_name, 'user_data': user_data, } return ec2_args