def _aws_connection(_region, _access_key, _secret_key, _token, _conn_type): conn = None if conn_type == 'ec2': conn = ec2.connect_to_region( _region, aws_access_key_id=_access_key, aws_secret_access_key=_secret_key, security_token=_token) elif conn_type == 'autoscale': conn = ec2.autoscale.connect_to_region( _region, aws_access_key_id=_access_key, aws_secret_access_key=_secret_key, security_token=_token) elif conn_type == 'cloudwatch': conn = ec2.cloudwatch.connect_to_region( _region, aws_access_key_id=_access_key, aws_secret_access_key=_secret_key, security_token=_token) elif conn_type == 'cloudformation': conn = boto.cloudformation.connect_to_region( _region, aws_access_key_id=_access_key, aws_secret_access_key=_secret_key, security_token=_token) elif conn_type == 's3': conn = boto.connect_s3( # Don't specify region when connecting to S3 aws_access_key_id=_access_key, aws_secret_access_key=_secret_key, security_token=_token) elif conn_type == 'elb': conn = ec2.elb.connect_to_region( _region, aws_access_key_id=_access_key, aws_secret_access_key=_secret_key, security_token=_token) elif conn_type == 'vpc': conn = vpc.connect_to_region( _region, aws_access_key_id=_access_key, aws_secret_access_key=_secret_key, security_token=_token) elif conn_type == 'iam': return None if conn: conn.https_validate_certificates = validate_certs return conn
def __init__(self, environment, deployment, region, zone, template=template): # Create connections to AWS components self.cfn_connection = cfn.connect_to_region(region) self.sns_connection = sns.connect_to_region(region) self.vpc_connection = vpc.connect_to_region(region) self.iam_connection = iam.connect_to_region("universal") # Temporary python class -> directory name hack self.lab_dir = self.__class__.__name__.lower() self.stack_name = "-".join( [self.lab_dir, environment, deployment, region, zone]) if environment != '': self.notification_arns = self.get_sns_topic( "cloudformation-notifications-" + environment) self.parameters = [] # Prepare the CFN template self.template_url = "/".join([ os.path.dirname(os.path.realpath(__file__)), self.lab_dir, vpc_provider, template ]) self.template_body = self.read_file(self.template_url, max_template_size) self.validate_template()
def SubnetList(VpcConn, Regions, VpcId='', Azone=''): """ Return list of AWSsubnet classes (optional VpcId,Region filter) """ retlist = [] if type(Regions) is not list: raise TypeError('Region needs to be a list') vpcFilter = [] if VpcId: vpcFilter = [('vpcId', VpcId)] for Reg in Regions: _regConn = vpc.connect_to_region(Reg) if vpcFilter: get_allsubs = _regConn.get_all_subnets(filters=vpcFilter) else: get_allsubs = _regConn.get_all_subnets() for _sub in get_allsubs: if Azone: if _sub.availability_zone == Azone: retlist.append(AWSsubnet(_sub)) else: retlist.append(AWSsubnet(_sub)) return retlist
def create(region, name=None, process=None, platform=None, env=None, raw_mode=False, fullname=None): """ """ vpc_connection = vpc.connect_to_region(region) ec2_connection = ec2.connect_to_region(region) main_vpcs = vpc_connection.get_all_vpcs(filters={'tag:Name': 'main'}) if len(main_vpcs) > 0: main_vpc = main_vpcs[0] else: raise ValueError( "Not found any VPC into region %s with tag Name: main" % (region)) vpc_id = main_vpc.id if raw_mode: if fullname is not None: securitygroup_name = fullname securitygroup_desc = securitygroup_name else: securitygroup_name = "%s-%s-%s-%s-sg" % (process, name, platform, env) securitygroup_desc = securitygroup_name securitygroup = ec2_connection.create_security_group(securitygroup_name, securitygroup_desc, vpc_id=vpc_id) securitygroup.add_tag("Name", securitygroup_name) return securitygroup
def create(region, name=None, process=None, platform=None, env=None, raw_mode=False, fullname=None): """ """ vpc_connection = vpc.connect_to_region(region) ec2_connection = ec2.connect_to_region(region) main_vpcs = vpc_connection.get_all_vpcs(filters={'tag:Name': 'main'}) if len(main_vpcs) > 0: main_vpc = main_vpcs[0] else: raise ValueError("Not found any VPC into region %s with tag Name: main" % (region)) vpc_id = main_vpc.id if raw_mode: if fullname is not None: securitygroup_name = fullname securitygroup_desc = securitygroup_name else: securitygroup_name = "%s-%s-%s-%s-sg" % (process, name, platform, env) securitygroup_desc = securitygroup_name securitygroup = ec2_connection.create_security_group(securitygroup_name, securitygroup_desc, vpc_id=vpc_id) securitygroup.add_tag("Name", securitygroup_name) return securitygroup
def connect(self, region=None): """ Obtain the VPC EC2 connector by authenticating. This also creates the keypair and security group for the instance. :param region: region to connect to (optional, defaults to eu-west1) """ self.vpc_conn = vpc.connect_to_region( region or self.region, aws_access_key_id=self.keyid, aws_secret_access_key=self.secret) self.vpc_zone = self.vpc_conn.create_vpc('10.10.0.0/16') self.vpc_conn.modify_vpc_attribute(self.vpc_zone.id, enable_dns_support=True) self.vpc_conn.modify_vpc_attribute(self.vpc_zone.id, enable_dns_hostnames=True) gateway = self.vpc_conn.create_internet_gateway() self.vpc_conn.attach_internet_gateway(gateway.id, self.vpc_zone.id) route_table = self.vpc_conn.create_route_table(self.vpc_zone.id) self.subnet = self.vpc_conn.create_subnet(self.vpc_zone.id, '10.10.10.0/24', availability_zone=self.zone) self.vpc_conn.associate_route_table(route_table.id, self.subnet.id) self.vpc_conn.create_route(route_table.id, '0.0.0.0/0', gateway.id) self.create_security_group(self.vpc_conn, vpc_id=self.vpc_zone.id) self.create_key_pair(self.vpc_conn) self.latestimage = self.newest_image(self.vpc_conn, os_type=self.imageid)
def get_subnet_id(self, subnet_name, vpc_id): conn = vpc.connect_to_region(self.region,aws_access_key_id=self.access_key, aws_secret_access_key=self.secret_key) filters = {'tag:Name': subnet_name, 'vpc-id': vpc_id} sub = conn.get_all_subnets(filters=filters) return sub[0].id
def launch_instance(args, userdata=None): """Connect to AWS and launch instance using args from create_parser""" # Make userdata unless given userdata = userdata or create_userdata(args.mirrorurl, args.ksurl) # Connect to EC2 endpoint for region conn = connect_to_region(args.region) # Choose first image ID that matches the given AMI name pattern try: id = conn.get_all_images(filters={'name': args.bootami})[0].id except IndexError: raise Error('ERROR: No matching AMIs found!') # Connect to the given SubnetID or get a list of subnets in this region if args.novpc: subnets = None else: c = vpc.connect_to_region(args.region) subnets = c.get_all_subnets(args.subnetid) # Use a VPC if we can, unless told not to. Use first subnet in list. if subnets: grpfilt = {'group-name': args.secgroup, 'vpc_id': subnets[0].vpc_id} subnetid = subnets[0].id # Find the security group id from the name group = conn.get_all_security_groups(filters=grpfilt)[0].id # associate the instance with a VPC and give it a puclic IP address interface = networkinterface.NetworkInterfaceSpecification( subnet_id=subnetid, groups=[group], associate_public_ip_address=True) interfaces = networkinterface.NetworkInterfaceCollection(interface) groups = None else: interfaces = None groups = [args.secgroup] # Set disk mapping if needed if args.disksize: dev_xvda = blockdevicemapping.BlockDeviceType( delete_on_termination=True) dev_xvda.size = args.disksize device_map = blockdevicemapping.BlockDeviceMapping() device_map['/dev/xvda'] = dev_xvda else: device_map = None # launch instance res = conn.run_instances(id, key_name=args.key, instance_type=args.type, network_interfaces=interfaces, user_data=userdata, security_groups=groups, block_device_map=device_map) return res.instances[0]
def main(argv): parser = argparse.ArgumentParser( description=("Create/Update a security group with Stackdriver IPs " "used for endpoint monitoring."), fromfile_prefix_chars="@") parser.add_argument("-s", "--security_group", default="stackdriver-endpoint-monitoring", help="Name of the security group to create/update.") parser.add_argument("--description", default="Security group containing Stackdriver endpoint IPs.", help="Description for the security group.") parser.add_argument("-r", "--region", default="us-east-1", help="The AWS region to use.") parser.add_argument("-k", "--stackdriver_api_key", required=True, help="API Key to use with Stackdriver API") parser.add_argument("-v", "--verbose", action="count") args = parser.parse_args() if args.verbose: logger.setLevel(logging.FATAL - (args.verbose * 10)) vpc_conn = vpc.connect_to_region(args.region) if vpc_conn is None: logger.fatal("Invalid AWS Region: %r", args.region) sys.exit(1) all_security_groups = vpc_conn.get_all_security_groups() all_vpcs = vpc_conn.get_all_vpcs() vpc_set = set(vpc.id for vpc in all_vpcs) logger.info("VPCs: %r", vpc_set) # Existing security groups with the given name existing_security_groups = [ sg for sg in all_security_groups if sg.name == args.security_group ] logger.info("Existing Security Groups: %r", existing_security_groups) # VPCs without the given security group nvpcs = vpc_set - set(sg.vpc_id for sg in existing_security_groups) logger.info("VPCs that need the new group: %r", nvpcs) for v in nvpcs: logger.info("Creating new security group: name=%r, vpc_id=%r", args.security_group, v) existing_security_groups.append( vpc_conn.create_security_group(args.security_group, args.description, vpc_id = v)) logger.info("Security groups to modify: %r", existing_security_groups) # Grab Stackdriver IPs from Stackdriver API url = "https://api.stackdriver.com/v0.2/endpoints/ips/?apikey=%s" % args.stackdriver_api_key try: result = urllib2.urlopen(url) result_body = result.readlines() import json parsed_json = json.loads(result_body[0]) except urllib2.URLError, e: logger.error(e)
def init_region(r): """ Takes a region string. Connects to that region. Returns EC2Connection and VPCConnection objects in a tuple. """ # connect to region c = vpc.connect_to_region(r) ec = ec2.connect_to_region(r) return (c, ec)
def init_region( r ): """ Takes a region string. Connects to that region. Returns EC2Connection and VPCConnection objects in a tuple. """ # connect to region c = vpc.connect_to_region( r ) ec = ec2.connect_to_region( r ) return ( c, ec )
def __get_boto_conn(self, region): if region not in self.boto_conns: self.boto_conns[region] = vpc.connect_to_region( region, aws_access_key_id=self.settings.getRegionalSetting( region, 'AWS_ACCESS_KEY'), aws_secret_access_key=self.settings.getRegionalSetting( region, 'AWS_SECRET_KEY')) return self.boto_conns[region]
def _vpc_connection(self): _vpc_connection = vpc.connect_to_region( aws_access_key_id=self.access_key, aws_secret_access_key=self.secret_key, region_name=settings.REGION ) self.logging.debug( "The connection with vpc was been succesfully" ) return _vpc_connection
def setup_networking_deprecated(): conn = boto_vpc.connect_to_region("us-east-1") vpc = conn.create_vpc("10.11.0.0/16") subnet1 = conn.create_subnet(vpc.id, "10.11.1.0/24", availability_zone="us-east-1a") subnet2 = conn.create_subnet(vpc.id, "10.11.2.0/24", availability_zone="us-east-1b") return {"vpc": vpc.id, "subnet1": subnet1.id, "subnet2": subnet2.id}
def setup_networking_deprecated(): conn = boto_vpc.connect_to_region('us-east-1') vpc = conn.create_vpc("10.11.0.0/16") subnet1 = conn.create_subnet(vpc.id, "10.11.1.0/24", availability_zone='us-east-1a') subnet2 = conn.create_subnet(vpc.id, "10.11.2.0/24", availability_zone='us-east-1b') return {'vpc': vpc.id, 'subnet1': subnet1.id, 'subnet2': subnet2.id}
def setup_networking_deprecated(): conn = boto_vpc.connect_to_region('us-east-1') vpc = conn.create_vpc("10.11.0.0/16") subnet1 = conn.create_subnet( vpc.id, "10.11.1.0/24", availability_zone='us-east-1a') subnet2 = conn.create_subnet( vpc.id, "10.11.2.0/24", availability_zone='us-east-1b') return {'vpc': vpc.id, 'subnet1': subnet1.id, 'subnet2': subnet2.id}
def _connection(self, region=None): """ Returns a connection "pool" (just a dict with an object per region used) to the calling code """ r = region or 'us-east-1' if r not in self._conns: self._conns[r] = vpc.connect_to_region( r, aws_access_key_id=self.aws_access_key_id, aws_secret_access_key=self.aws_secret_access_key, ) return self._conns[r]
def get(platform, region, zone=None): connection = vpc.connect_to_region(region) if zone is not None: subnet = [subnet.id for subnet in connection.get_all_subnets(filters={'tag:Platform': platform, 'tag:Zone': zone})] if len(subnet) > 0: return subnet[0] subnets = [subnet.id for subnet in connection.get_all_subnets(filters={'tag:Platform': platform})] if len(subnets) > 0: return subnets return None
def main(access_key, secret_key, path, region, instance, cidr, protocol, icmp_type, icmp_code, start_port, end_port, vpc_id, rule_number): """ Parse inputs and apply rule to black list all traffic from <cidr>. """ conn = None if access_key is None or secret_key is None: conn = vpc.connect_to_region(region) else: conn = vpc.connect_to_region(region=region, **{'aws_access_key_id' : access_key, 'aws_secret_access_key' : secret_key}) if conn is None: print "Unable to connect to region with credentials." raise SystemExit(1) rule['rule_number'] = rule_number rule['protocol'] = protocol rule['cidr_block'] = cidr rule['icmp_type'] = icmp_type rule['icmp_code'] = icmp_code rule['port_range_from'] = start_port rule['port_range_to'] = end_port if protocol == 'List': print "VPC List:\n=========" vpcs = conn.get_all_vpcs() for i in vpcs: try: print "Name: {0}; Tags: {1}; Subnet: {2}".format(i, i.tags['Name'], i.cidr_block) except: print "Name: {0}; Tags: No Tags Available; Subnet: {1}".format(i, i.cidr_block) conn.close() raise SystemExit(0)
def _aws_connection(_region, _access_key, _secret_key, _token, _conn_type): conn = None if conn_type == 'ec2': conn = ec2.connect_to_region(_region, aws_access_key_id=_access_key, aws_secret_access_key=_secret_key, security_token=_token) elif conn_type == 'autoscale': conn = ec2.autoscale.connect_to_region( _region, aws_access_key_id=_access_key, aws_secret_access_key=_secret_key, security_token=_token) elif conn_type == 'cloudwatch': conn = ec2.cloudwatch.connect_to_region( _region, aws_access_key_id=_access_key, aws_secret_access_key=_secret_key, security_token=_token) elif conn_type == 'cloudformation': conn = boto.cloudformation.connect_to_region( _region, aws_access_key_id=_access_key, aws_secret_access_key=_secret_key, security_token=_token) elif conn_type == 's3': conn = boto.connect_s3( # Don't specify region when connecting to S3 aws_access_key_id=_access_key, aws_secret_access_key=_secret_key, security_token=_token) elif conn_type == 'elb': conn = ec2.elb.connect_to_region( _region, aws_access_key_id=_access_key, aws_secret_access_key=_secret_key, security_token=_token) elif conn_type == 'vpc': conn = vpc.connect_to_region(_region, aws_access_key_id=_access_key, aws_secret_access_key=_secret_key, security_token=_token) elif conn_type == 'iam': return None if conn: conn.https_validate_certificates = validate_certs return conn
def aws_connection(region, access_key, secret_key, token, conn_type, validate_certs=False): """Return AWS EC2 connection object Pulls from Beaker cache on subsequent calls to avoid connection overhead :type region: string :param region: region name (e.g. 'us-east-1') :type access_key: string :param access_key: AWS access key :type secret_key: string :param secret_key: AWS secret key :type conn_type: string :param conn_type: Connection type ('ec2', 'autoscale', 'cloudwatch', 'cloudformation', 'elb', or 's3') :type validate_certs: bool :param validate_certs: indicates to check the ssl cert the server provides """ conn = None if conn_type == 'ec2': conn = ec2.connect_to_region( region, aws_access_key_id=access_key, aws_secret_access_key=secret_key, security_token=token) elif conn_type == 'autoscale': conn = ec2.autoscale.connect_to_region( region, aws_access_key_id=access_key, aws_secret_access_key=secret_key, security_token=token) elif conn_type == 'cloudwatch': conn = ec2.cloudwatch.connect_to_region( region, aws_access_key_id=access_key, aws_secret_access_key=secret_key, security_token=token) elif conn_type == 'cloudformation': conn = boto.cloudformation.connect_to_region( region, aws_access_key_id=access_key, aws_secret_access_key=secret_key, security_token=token) elif conn_type == 's3': conn = boto.connect_s3( # Don't specify region when connecting to S3 aws_access_key_id=access_key, aws_secret_access_key=secret_key, security_token=token) elif conn_type == 'elb': conn = ec2.elb.connect_to_region( region, aws_access_key_id=access_key, aws_secret_access_key=secret_key, security_token=token) elif conn_type == 'vpc': conn = vpc.connect_to_region( region, aws_access_key_id=access_key, aws_secret_access_key=secret_key, security_token=token) elif conn_type == 'iam': return None if conn: conn.https_validate_certificates = validate_certs return conn
def __init__(self, environment, deployment, region, zone, aws_access_key_id, aws_secret_access_key): # Create connections to AWS components self.cfn_connection = cfn.connect_to_region(region, aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key) self.sns_connection = sns.connect_to_region(region, aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key) self.vpc_connection = vpc.connect_to_region(region, aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key) # Temporary python class -> directory name hack lab_dir = self.__class__.__name__.lower() self.stack_name = "-".join([lab_dir, environment, deployment, region, zone]) self.notification_arns = self.get_sns_topic("cloudformation-notifications-" + environment) self.parameters = [] # Prepare the CFN template self.template_url = "/".join([os.path.dirname(os.path.realpath(__file__)), lab_dir, vpc_provider, template]) self.template_body = self.read_file(self.template_url, max_template_size) self.validate_template()
def vpc_connect(self, region=None): """ Obtain the VPC EC2 connector by authenticating. This also creates the keypair and security group for the instance. :param region: region to connect to (optional, defaults to eu-west1) """ self.vpc_conn = vpc.connect_to_region(region or self.region, aws_access_key_id=self.keyid, aws_secret_access_key=self.secret) self.vpc_zone = self.vpc_conn.create_vpc('10.10.0.0/16') self.vpc_conn.modify_vpc_attribute(self.vpc_zone.id, enable_dns_support=True) self.vpc_conn.modify_vpc_attribute(self.vpc_zone.id, enable_dns_hostnames=True) gateway = self.vpc_conn.create_internet_gateway() self.vpc_conn.attach_internet_gateway(gateway.id, self.vpc_zone.id) route_table = self.vpc_conn.create_route_table(self.vpc_zone.id) self.subnet = self.vpc_conn.create_subnet(self.vpc_zone.id, '10.10.10.0/24', availability_zone=self.zone) self.vpc_conn.associate_route_table(route_table.id, self.subnet.id) self.vpc_conn.create_route(route_table.id, '0.0.0.0/0', gateway.id) self.create_security_group(self.vpc_conn, vpc_id=self.vpc_zone.id) self.create_key_pair(self.vpc_conn)
def EbsList(VpcConn, Regions, instance=''): """ Return list of AWSebs classes (optional Filter by instance) """ retlist = [] if type(Regions) is not list: raise TypeError('Region needs to be a list') if instance and instance.startswith('i-'): ebs_filter = {'attachment.instance-id': instance} else: ebs_filter = {} for Reg in Regions: _regConn = vpc.connect_to_region(Reg) get_allvols = _regConn.get_all_volumes(filters=ebs_filter) for _vol in get_allvols: retlist.append(AWSebs(_vol)) return retlist
def __init__(self, module, env, cluster, zone, template_name='template.json', **optionals): # debug and/or dry-run mode self.debug = optionals['debug'] self.dry_run = optionals['dry_run'] self.enable_debug() self.env = env.strip().lower() self.cluster = snake_case_with_dashes(cluster.strip()) self.zone = zone self.template_name = template_name # for stack names & resources names in template, use only non-default names self.non_default_cluster = self.cluster if self.cluster not in [default_cluster_name] else None # stack name self.module_name = snake_case_with_dashes(module.strip()) self.stack_name = "-".join(filter(None, [self.module_name, self.env, self.non_default_cluster, self.zone])) # params self.parameters = [] # stack tags self.stack_tags = { '{0}:minion:env'.format(org_name): self.env, '{0}:minion:cluster'.format(org_name): self.cluster, '{0}:minion:module'.format(org_name): self.module_name } # Create connections to AWS components aws_access_key_id = get_env_variable('MINION_ACCESS_KEY_ID') aws_secret_access_key = get_env_variable('MINION_SECRET_ACCESS_KEY') self.cfn_connection = cfn.connect_to_region(aws_region, aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key) self.vpc_connection = vpc.connect_to_region(aws_region, aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key) self.iam_connection = iam.connect_to_region("universal", aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key)
def get(platform, region, zone=None): connection = vpc.connect_to_region(region) if zone is not None: subnet = [ subnet.id for subnet in connection.get_all_subnets(filters={ 'tag:Platform': platform, 'tag:Zone': zone }) ] if len(subnet) > 0: return subnet[0] subnets = [ subnet.id for subnet in connection.get_all_subnets( filters={'tag:Platform': platform}) ] if len(subnets) > 0: return subnets return None
def main(): print HEADER for Region in Regions: awsRegionConn = vpc.connect_to_region(Region) for _vpc in awsRegionConn.get_all_vpcs(): printdict = {} v_desc = '' v_name = '' if 'Name' in _vpc.tags.keys(): v_name = _vpc.tags['Name'] if 'Description' in _vpc.tags.keys(): v_desc = _vpc.tags['Description'] printdict['vpcid'] = _vpc.id printdict['cidr'] = "%-16s" % _vpc.cidr_block printdict['region'] = "%-11s" % _vpc.region.name printdict['name'] = "%-18s" % v_name printdict['description'] = v_desc print VPC_LINE.substitute(printdict)
def SecurityGroupList(VpcConn, Regions, VpcId=''): """ Return list of AWSsubnet classes (optional VpcId,Region filter) """ retlist = [] if type(Regions) is not list: raise TypeError('Region needs to be a list') vpcFilter = {} if VpcId: vpcFilter = {'vpc_id': VpcId} for Reg in Regions: _regConn = vpc.connect_to_region(Reg) if vpcFilter: get_allsgs = _regConn.get_all_security_groups(filters=vpcFilter) else: get_allsgs = _regConn.get_all_security_groups() for _sgs in get_allsgs: retlist.append(AWSsecuritygroup(_sgs)) return retlist
def _initialise_aws_connections(): _log.info('Initialising AWS Connections') _validate_environment() # Even though the Boto lib can use the environment variables we'll import one # for easier re-use in this script _log.info('Loading credentials from Environment') aws_region = os.getenv('AWS_DEFAULT_REGION') _log.info('Initializing Boto resources') resources = { 'vpc': vpc.connect_to_region(aws_region), 'ec2': ec2.connect_to_region(aws_region), 'elb': elb.connect_to_region(aws_region), 'iam': iam.connect_to_region(aws_region), 'route53': Route53Connection(), 'cloudwatch': cloudwatch.connect_to_region(aws_region), 'region': aws_region } return resources
def InstanceList(VpcConn, Regions, VpcId=''): """ Return list of AWSinstance classes (optional VpcId,Region filter) """ retlist = [] if type(Regions) is not list: raise TypeError('Region needs to be a list') vpcFilter = {} if VpcId: vpcFilter = {'vpc_id': VpcId} for Reg in Regions: _regConn = vpc.connect_to_region(Reg) if vpcFilter: get_all = _regConn.get_all_instances(filters=vpcFilter) else: get_all = _regConn.get_all_instances() ## add filters here ## for _item in get_all: retlist.append(AWSinstance(_item.instances[0])) return retlist
instance_type =INS_TYPE, key_name =INS_KEY_NAME, security_group_ids=INS_SECGROUPS, user_data =USER_DATA_SERVERNAME, subnet_id =INS_SUBNET.id) time.sleep(3) instance=reservation.instances[0] instance.add_tag("Project", INS_PROJECT) instance.add_tag("Name", SERVER_NAME) created_instances.append(instance) return created_instances #CREATING VPC print("Connecting to AWS") vpc_con = vpc.connect_to_region("ap-southeast-1") print("Creating VPC") my_vpc = vpc_con.create_vpc('10.0.0.0/16') vpc_con.modify_vpc_attribute(my_vpc.id, enable_dns_support=True) vpc_con.modify_vpc_attribute(my_vpc.id, enable_dns_hostnames=True) print("Tagging VPC") my_vpc.add_tag("Name",PROJECT+"-VPC") my_vpc.add_tag("Project",PROJECT) print("Creating subnets") subnetdmz = vpc_con.create_subnet(my_vpc.id,'10.0.1.0/24') subnetbe = vpc_con.create_subnet(my_vpc.id,'10.0.2.0/24') print("Tagging subnet") subnetdmz.add_tag("Name",PROJECT+"-Subnet") subnetdmz.add_tag("Project",PROJECT) subnetbe.add_tag("Name",PROJECT+"-Subnet") subnetbe.add_tag("Project",PROJECT)
def aws_connection(region, access_key, secret_key, token, conn_type, validate_certs=False): """Return AWS EC2 connection object Pulls from Beaker cache on subsequent calls to avoid connection overhead :type region: string :param region: region name (e.g. 'us-east-1') :type access_key: string :param access_key: AWS access key :type secret_key: string :param secret_key: AWS secret key :type conn_type: string :param conn_type: Connection type ('ec2', 'autoscale', 'cloudwatch', 'cloudformation', 'elb', or 's3') :type validate_certs: bool :param validate_certs: indicates to check the ssl cert the server provides """ conn = None if conn_type == 'ec2': conn = ec2.connect_to_region(region, aws_access_key_id=access_key, aws_secret_access_key=secret_key, security_token=token) elif conn_type == 'autoscale': conn = ec2.autoscale.connect_to_region( region, aws_access_key_id=access_key, aws_secret_access_key=secret_key, security_token=token) elif conn_type == 'cloudwatch': conn = ec2.cloudwatch.connect_to_region( region, aws_access_key_id=access_key, aws_secret_access_key=secret_key, security_token=token) elif conn_type == 'cloudformation': conn = boto.cloudformation.connect_to_region( region, aws_access_key_id=access_key, aws_secret_access_key=secret_key, security_token=token) elif conn_type == 's3': conn = boto.connect_s3( # Don't specify region when connecting to S3 aws_access_key_id=access_key, aws_secret_access_key=secret_key, security_token=token) elif conn_type == 'elb': conn = ec2.elb.connect_to_region(region, aws_access_key_id=access_key, aws_secret_access_key=secret_key, security_token=token) elif conn_type == 'vpc': conn = vpc.connect_to_region(region, aws_access_key_id=access_key, aws_secret_access_key=secret_key, security_token=token) elif conn_type == 'iam': return None if conn: conn.https_validate_certificates = validate_certs return conn
def create_instance(self): """ Method to create the instance, eip, ebs resources """ if self.ACCESS and self.SECRET: boto.connect_vpc(self.ACCESS, self.SECRET) else: boto.connect_vpc() awsRegConn = BotoVPC.connect_to_region(self.region) ## Allocate new EIP if True - do this first to error out before Instance gets created ## if self.eip: _VpcEip = awsRegConn.allocate_address(domain='vpc') self.pub_ip = _VpcEip.public_ip self.eip_alloc_id = _VpcEip.allocation_id # Launch the instance # print "Launching instance: %s / %s / %s" % (self.hostname, self.type, self.region), sys.stdout.flush() AgReservation = awsRegConn.run_instances( image_id=self.ami_id, key_name=self.rootkey, subnet_id=self.subnet, security_group_ids=self.groups, private_ip_address=self.priv_ip, disable_api_termination=True, block_device_map=self.bdm, instance_type=self.type, ) # wait a few secs before pulling the Instance object # sleep(5) _AgInst = AgReservation.instances[0] self.instance_id = _AgInst.id self.avail_zone = _AgInst.placement print " ... done." print "Adding Name Tag.. ", sys.stdout.flush() # Create Tags here # _AgInst.add_tag("Name", self.nametag) print " ... done." print "Waiting for running state:", sys.stdout.flush() _count = 0 while _AgInst.update() != 'running': sys.stdout.write('.') sys.stdout.flush() sleep(3) if _count >= 60: print "\n[Error] 3min timeout reached waiting for 'running' state." print "_current_state_ = %s" % _AgInst.update() sys.exit(2) _count += 1 print "[running]" # Need to wait a few more secs for the private IP to get assigned # sleep(5) _AgInst.update() self.priv_ip = _AgInst.private_ip_address # Assign EIP if True # if self.eip: print "Assigning EIP: %s" % (self.pub_ip) awsRegConn.associate_address(self.instance_id, None, self.eip_alloc_id) return
#!/usr/bin/env python from boto import vpc import time REGION = 'eu-central-1' PROJECT = 'Hiera-demo' vpc_con = vpc.connect_to_region(REGION) print("Terminating all instances and their elastic ips") instances = vpc_con.get_only_instances(filters=({"tag:Project": PROJECT, "instance-state-name": [ "pending", "running", "stopping", "stopped", "shutting-down" ]})) for instance in instances: eip_addresses=vpc_con.get_all_addresses(filters=({"instance_id": instance.id})) for eip_address in eip_addresses: print(eip_address) eip_address.disassociate() eip_address.release() print(instance) instance.terminate() while len(vpc_con.get_only_instances(filters=({"tag:Project": PROJECT, "instance-state-name": [ "pending", "running", "stopping", "stopped", "shutting-down" ]}))) != 0: time.sleep(5) print ("Waiting for instances to stop") print("Deleting all subnets") subnets = vpc_con.get_all_subnets(filters=({"tag:Project": PROJECT})) for subnet in subnets: print(subnet) vpc_con.delete_subnet(subnet.id)
__version__ = "1.0" __author__ = "Paolo Latella" __email__ = "*****@*****.**" from boto import ec2 from boto import vpc import urllib2 REGION = 'eu-west-1' RT = 'rtb-aa118bcf' DESTINATION = '0.0.0.0/0' #get instance medatadata my_instance_id = urllib2.urlopen('http://169.254.169.254/latest/meta-data/instance-id').read() #disable source-destination check connection_to_ec2 = ec2.connect_to_region(REGION) connection_to_ec2.modify_instance_attribute(my_instance_id, attribute='sourceDestCheck', value=False) #get the routing table connection_to_vpc = vpc.connect_to_region(REGION) connection_to_vpc.replace_route(RT, DESTINATION, instance_id=my_instance_id)
def get_info(self): ## boto initial connection required before connecting to a region ## boto.connect_vpc() awsRegionConn = BotoVPC.connect_to_region(self.region) for _vid in awsRegionConn.get_all_vpcs(): if _vid.id == self.vpc_id: self._valid = True self.cidr_block = _vid.cidr_block self.region = _vid.region.name for _sub in awsRegionConn.get_all_subnets(filters=[('vpcId', _vid.id)]): self.subnets[_sub.id] = { 'cidr': _sub.cidr_block, 'zone': _sub.availability_zone } if 'Name' in _vid.tags.keys(): self.name = _vid.tags['Name'] if 'Description' in _vid.tags.keys(): self.description = _vid.tags['Description'] for _sg in awsRegionConn.get_all_security_groups( filters={'vpc_id': self.vpc_id}): self.security_groups[_sg.id] = { 'name': _sg.name, 'description': _sg.description } for _resv in awsRegionConn.get_all_instances( filters={'vpc_id': self.vpc_id}): _inst = _resv.instances[0] self.instances[_inst.id] = { 'private_ip': _inst.private_ip_address, 'public_ip': _inst.ip_address, 'state': _inst.state, 'subnet_id': _inst.subnet_id, 'type': _inst.instance_type, 'avail_zone': _inst.placement, } if 'Name' in _inst.tags.keys(): self.instances[_inst.id]['Name'] = _inst.tags['Name'] # get RDS info - Call is required before switching regions# boto.connect_rds() rdsRegionConn = BotoRDS.connect_to_region(self.region) for _rds in rdsRegionConn.get_all_dbinstances(): try: if _rds.VpcId == self.vpc_id: self.rds[_rds.id] = { 'zone': _rds.availability_zone, 'class': _rds.instance_class, 'size': _rds.allocated_storage, } try: self.rds[_rds.id]['db_name'] = _rds.DBName except AttributeError as e: self.rds[_rds.id]['db_name'] = '' except AttributeError as e: # VpcId attribute is not available if not in a VPC # pass if not self._valid: raise VpcIdError("VpcId: %s Not found in AWS account!" % (self.vpc_id))
instance_type=INS_TYPE, key_name=INS_KEY_NAME, security_group_ids=INS_SECGROUPS, user_data=USER_DATA_SERVERNAME, subnet_id=INS_SUBNET.id) time.sleep(3) instance = reservation.instances[0] instance.add_tag("Project", INS_PROJECT) instance.add_tag("Name", SERVER_NAME) created_instances.append(instance) return created_instances #CREATING VPC print("Connecting to AWS") vpc_con = vpc.connect_to_region("ap-southeast-1") print("Creating VPC") my_vpc = vpc_con.create_vpc('10.0.0.0/16') vpc_con.modify_vpc_attribute(my_vpc.id, enable_dns_support=True) vpc_con.modify_vpc_attribute(my_vpc.id, enable_dns_hostnames=True) print("Tagging VPC") my_vpc.add_tag("Name", PROJECT + "-VPC") my_vpc.add_tag("Project", PROJECT) print("Creating subnets") subnetdmz = vpc_con.create_subnet(my_vpc.id, '10.0.1.0/24') subnetbe = vpc_con.create_subnet(my_vpc.id, '10.0.2.0/24') print("Tagging subnet") subnetdmz.add_tag("Name", PROJECT + "-Subnet") subnetdmz.add_tag("Project", PROJECT) subnetbe.add_tag("Name", PROJECT + "-Subnet") subnetbe.add_tag("Project", PROJECT)
regions = regioninfo.get_regions("ec2") region_list = [r for r in regions if (not "gov" in r.name and not "cn-north" in r.name)] return region_list def output(out_string): if args.output: print(out_string) if OUTPUT_FILE is not None: OUTPUT_FILE.write(out_string + '\n') OUTPUT_FILE.flush() region_data = [] for region in get_regions(): vpc_client = vpc.connect_to_region(region.name, aws_access_key_id=args.access_key, aws_secret_access_key=args.secret_key) vpc_list = vpc_client.get_all_vpcs() network_acls = vpc_client.get_all_network_acls() security_groups = vpc_client.get_all_security_groups() subnets = vpc_client.get_all_subnets() region_data.append( RegionData(region_name=region.name, vpcs=vpc_list, nacl_list=network_acls, sg_list=security_groups, subnet_list=subnets)) for data in region_data: default_vpc_id = None for vpc in data.vpc_list: if vpc.is_default is True: continue output("Region: {0}".format(data.region)) output("ID: {0}".format(vpc.id))
def real_main(): (opts, action, cluster_name) = parse_args() try: if opts.vpc_id is None: conn = ec2.connect_to_region(opts.region) else: print ("Debug: Making VPC conncetion") conn = vpc.connect_to_region(opts.region) except Exception as e: print >> stderr, (e) sys.exit(1) # Select an AZ at random if it was not specified. if opts.zone == "": opts.zone = random.choice(conn.get_all_zones()).name if action == "launch": if opts.slaves <= 0: print >> sys.stderr, "ERROR: You have to start at least 1 slave" sys.exit(1) if opts.resume: (master_nodes, slave_nodes) = get_existing_cluster(conn, opts, cluster_name) else: (master_nodes, slave_nodes) = launch_cluster(conn, opts, cluster_name) wait_for_cluster(conn, opts.wait, master_nodes, slave_nodes) setup_cluster(conn, master_nodes, slave_nodes, opts, True) elif action == "destroy": print "Are you sure you want to destroy the cluster %s?" % cluster_name print "The following instances will be terminated:" (master_nodes, slave_nodes) = get_existing_cluster(conn, opts, cluster_name, die_on_error=False) for inst in master_nodes + slave_nodes: print "> %s" % inst.public_dns_name msg = "ALL DATA ON ALL NODES WILL BE LOST!!\nDestroy cluster %s (y/N): " % cluster_name response = raw_input(msg) if response == "y": print "Terminating master..." for inst in master_nodes: inst.terminate() print "Terminating slaves..." for inst in slave_nodes: inst.terminate() # Delete security groups as well if opts.delete_groups: print "Deleting security groups (this will take some time)..." group_names = [cluster_name + "-master", cluster_name + "-slaves"] attempt = 1 while attempt <= 3: print "Attempt %d" % attempt groups = [g for g in conn.get_all_security_groups() if g.name in group_names] success = True # Delete individual rules in all groups before deleting groups to # remove dependencies between them for group in groups: print "Deleting rules in security group " + group.name for rule in group.rules: for grant in rule.grants: success &= group.revoke( ip_protocol=rule.ip_protocol, from_port=rule.from_port, to_port=rule.to_port, src_group=grant, ) # Sleep for AWS eventual-consistency to catch up, and for instances # to terminate time.sleep(30) # Yes, it does have to be this long :-( for group in groups: try: conn.delete_security_group(group.name) print "Deleted security group " + group.name except boto.exception.EC2ResponseError: success = False print "Failed to delete security group " + group.name # Unfortunately, group.revoke() returns True even if a rule was not # deleted, so this needs to be rerun if something fails if success: break attempt += 1 if not success: print "Failed to delete all security groups after 3 tries." print "Try re-running in a few minutes." elif action == "login": (master_nodes, slave_nodes) = get_existing_cluster(conn, opts, cluster_name) master = master_nodes[0].public_dns_name print "Logging into master " + master + "..." proxy_opt = [] if opts.proxy_port is not None: proxy_opt = ["-D", opts.proxy_port] subprocess.check_call(ssh_command(opts) + proxy_opt + ["-t", "-t", "%s@%s" % (opts.user, master)]) elif action == "get-master": (master_nodes, slave_nodes) = get_existing_cluster(conn, opts, cluster_name) print master_nodes[0].public_dns_name elif action == "stop": response = raw_input( "Are you sure you want to stop the cluster " + cluster_name + "?\nDATA ON EPHEMERAL DISKS WILL BE LOST, " + "BUT THE CLUSTER WILL KEEP USING SPACE ON\n" + "AMAZON EBS IF IT IS EBS-BACKED!!\n" + "All data on spot-instance slaves will be lost.\n" + "Stop cluster " + cluster_name + " (y/N): " ) if response == "y": (master_nodes, slave_nodes) = get_existing_cluster(conn, opts, cluster_name, die_on_error=False) print "Stopping master..." for inst in master_nodes: if inst.state not in ["shutting-down", "terminated"]: inst.stop() print "Stopping slaves..." for inst in slave_nodes: if inst.state not in ["shutting-down", "terminated"]: if inst.spot_instance_request_id: inst.terminate() else: inst.stop() elif action == "start": (master_nodes, slave_nodes) = get_existing_cluster(conn, opts, cluster_name) print "Starting slaves..." for inst in slave_nodes: if inst.state not in ["shutting-down", "terminated"]: inst.start() print "Starting master..." for inst in master_nodes: if inst.state not in ["shutting-down", "terminated"]: inst.start() wait_for_cluster(conn, opts.wait, master_nodes, slave_nodes) setup_cluster(conn, master_nodes, slave_nodes, opts, False) else: print >> stderr, "Invalid action: %s" % action sys.exit(1)
#!/usr/bin/python3.4 from boto import vpc import time REGION = 'ap-southeast-1' PROJECT = 'Hadoop1' vpc_con = vpc.connect_to_region(REGION) print("Terminating all instances and their elastic ips") instances = vpc_con.get_only_instances(filters=({ "tag:Project": PROJECT, "instance-state-name": ["pending", "running", "stopping", "stopped", "shutting-down"] })) for instance in instances: eip_addresses = vpc_con.get_all_addresses( filters=({ "instance_id": instance.id })) for eip_address in eip_addresses: print(eip_address) eip_address.disassociate() eip_address.release() print(instance) instance.terminate() while len( vpc_con.get_only_instances(filters=({