def list(self): """ List all paths and return a dictionary structure representing a graph. """ ec2 = self.driver.client("ec2") sg_to_service = {} for service in self.service.list(): sg_id = self.asg.get_launch_configuration_security_group( service.network.name, service.name) if sg_id in sg_to_service: raise BadEnvironmentStateException( "Service %s and %s have same security group: %s" % (sg_to_service[sg_id], service, sg_id)) sg_to_service[sg_id] = service security_groups = ec2.describe_security_groups() def make_path(destination, source, rule): return Path(destination.network, source, destination, rule["IpProtocol"], rule.get("FromPort", "N/A")) def get_cidr_paths(destination, ip_permissions): subnets = [] for ip_range in ip_permissions["IpRanges"]: subnets.append( Subnetwork(subnetwork_id=None, name=None, cidr_block=ip_range["CidrIp"], region=None, availability_zone=None, instances=[])) # We treat an explicit CIDR block as a special case of a service with no name. paths = [] if subnets: source = Service(network=None, name=None, subnetworks=subnets) paths.append(make_path(destination, source, ip_permissions)) return paths def get_sg_paths(destination, ip_permissions): paths = [] for group in ip_permissions["UserIdGroupPairs"]: service = sg_to_service[group["GroupId"]] paths.append(make_path(destination, service, ip_permissions)) return paths paths = [] for security_group in security_groups["SecurityGroups"]: if security_group["GroupId"] not in sg_to_service: logger.debug( "Security group %s is apparently not attached to a service. Skipping", security_group["GroupId"]) continue service = sg_to_service[security_group["GroupId"]] for ip_permissions in security_group["IpPermissions"]: logger.debug("ip_permissions: %s", ip_permissions) paths.extend(get_cidr_paths(service, ip_permissions)) paths.extend(get_sg_paths(service, ip_permissions)) return paths
def create(self, name, blueprint): """ Create new network named "name" with blueprint file at "blueprint". """ ec2 = self.driver.client("ec2") if self.get(name): raise DisallowedOperationException("Found existing VPC named: %s" % name) if blueprint: network_blueprint = NetworkBlueprint(blueprint) else: network_blueprint = NetworkBlueprint(None, "") allocation_blocks = network_blueprint.get_allowed_private_cidr() def get_cidr(prefix, address_range_includes, address_range_excludes): for address_range_include in address_range_includes: for cidr in generate_subnets(address_range_include, address_range_excludes, prefix, count=1): return str(cidr) raise NotEnoughIPSpaceException( "Could not allocate network of size " "%s in %s, excluding %s" % (prefix, address_range_includes, address_range_includes)) vpc = ec2.create_vpc(CidrBlock=get_cidr(network_blueprint.get_prefix(), [allocation_blocks], [])) vpc_id = vpc["Vpc"]["VpcId"] try: creation_retries = 0 while creation_retries < RETRY_COUNT: try: ec2.create_tags(Resources=[vpc_id], Tags=[{ "Key": "Name", "Value": name }]) if not self.get(name): time.sleep(float(RETRY_DELAY)) else: break except ec2.exceptions.ClientError as client_error: logger.debug("Received exception tagging VPC: %s", client_error) time.sleep(float(RETRY_DELAY)) creation_retries = creation_retries + 1 if creation_retries >= RETRY_COUNT: raise OperationTimedOut("Cannot find created VPC: %s" % vpc_id) except OperationTimedOut as exception: ec2.delete_vpc(VpcId=vpc_id) raise exception return canonicalize_network_info( name, vpc["Vpc"], self.driver.session.Session().region_name)
def discover_instances(instance_ids): ec2 = self.driver.client("ec2") logger.debug("Discovering instances: %s", instance_ids) instances = {"Reservations": []} # Need this check because if we pass an empty list the API returns all instances if instance_ids: instances = ec2.describe_instances(InstanceIds=instance_ids) return [instance for reservation in instances["Reservations"] for instance in reservation["Instances"]]
def destroy(self, service): """ Destroy a group of instances described by "service". """ logger.debug("Attempting to destroy: %s", service) asg_name = AsgName(network=service.network.name, subnetwork=service.name) self.asg.destroy_auto_scaling_group_instances(asg_name) # Wait for instances to be gone. Need to do this before we can delete # the actual ASG otherwise it will error. def instance_list(service, state): return [instance for subnetwork in service.subnetworks for instance in subnetwork.instances if instance.state == state] asg = self.get(service.network, service.name) retries = 0 while asg and instance_list(asg, "terminated"): logger.info("Waiting for instance termination in asg: %s", asg) asg = self.get(service.network, service.name) retries = retries + 1 if retries > 60: raise OperationTimedOut("Timed out waiting for ASG scale down") time.sleep(float(10)) self.asg.destroy_auto_scaling_group(asg_name) # Wait for ASG to be gone. Need to wait for this because it's a # dependency of the launch configuration. asg = self.get(service.network, service.name) retries = 0 while asg: logger.info("Waiting for asg deletion: %s", asg) asg = self.get(service.network, service.name) retries = retries + 1 if retries > 60: raise OperationTimedOut("Timed out waiting for ASG deletion") time.sleep(float(10)) vpc_id = service.network.network_id lc_security_group = self.asg.get_launch_configuration_security_group( service.network, service.name) self.asg.destroy_launch_configuration(asg_name) if lc_security_group: self.security_groups.delete_referencing_rules(vpc_id, lc_security_group) self.security_groups.delete_with_retries(lc_security_group, RETRY_COUNT, RETRY_DELAY) else: self.security_groups.delete_by_name(vpc_id, str(asg_name), RETRY_COUNT, RETRY_DELAY) self.subnetwork.destroy(service.network, service.name)
def discover_asg(network_name, service_name): autoscaling = self.driver.client("autoscaling") logger.debug("Discovering auto scaling groups with name: %s", service_name) asg_name = AsgName(network=network_name, subnetwork=service_name) asgs = autoscaling.describe_auto_scaling_groups( AutoScalingGroupNames=[str(asg_name)]) logger.debug("Found asgs: %s", asgs) if len(asgs["AutoScalingGroups"]) > 1: raise BadEnvironmentStateException( "Expected to find at most one auto scaling group " "named: %s, output: %s" % (str(asg_name), asgs)) if not asgs["AutoScalingGroups"]: return None return asgs["AutoScalingGroups"][0]
def has_access(self, source, destination, port): """ Return true if there is a route from "source" to "destination". """ ec2 = self.driver.client("ec2") dest_sg_id, src_sg_id, _, src_ip_permissions = self._extract_service_info( source, destination, port) security_group = ec2.describe_security_groups(GroupIds=[dest_sg_id]) def extract_cidr_port(ip_permissions): cidr_port_list = [] for ip_permission in ip_permissions: if "IpRanges" in ip_permission: for ip_range in ip_permission["IpRanges"]: cidr_port_list.append({ "port": ip_permission["FromPort"], "cidr": ip_range["CidrIp"] }) return cidr_port_list def sg_allowed(ip_permissions, sg_id, port): for ip_permission in ip_permissions: if (ip_permission["UserIdGroupPairs"] and ip_permission["FromPort"] == port): for pair in ip_permission["UserIdGroupPairs"]: if "GroupId" in pair and pair["GroupId"] == sg_id: return True return False ip_permissions = security_group["SecurityGroups"][0]["IpPermissions"] logger.debug("ip_permissions: %s", ip_permissions) if src_sg_id and sg_allowed(ip_permissions, src_sg_id, port): return True src_cidr_port_info = extract_cidr_port(src_ip_permissions) cidr_port_info = extract_cidr_port(ip_permissions) for cidr_port in cidr_port_info: if cidr_port["port"] != port: continue for src_cidr_port in src_cidr_port_info: if (ipaddress.IPv4Network(src_cidr_port["cidr"]).overlaps( ipaddress.IPv4Network(cidr_port["cidr"]))): return True return False
def add(self, source, destination, port): """ Adds a route from "source" to "destination". """ logger.debug("Adding path from %s to %s", source, destination) if self.has_access(source, destination, port): logger.info("Service %s already has access to %s on port: %s", source, destination, port) return True # Currently controlling egress in AWS is not supported. All egress is always allowed. if not isinstance(destination, Service): raise DisallowedOperationException( "Destination must be a butter.types.networking.Service object") dest_sg_id, _, _, src_ip_permissions = self._extract_service_info( source, destination, port) ec2 = self.driver.client("ec2") ec2.authorize_security_group_ingress(GroupId=dest_sg_id, IpPermissions=src_ip_permissions) return Path(destination.network, source, destination, "tcp", port)
def get(self, network, service_name): """ Discover a service in "network" named "service_name". """ logger.info("Discovering autoscaling group named %s in network: %s", service_name, network) def discover_asg(network_name, service_name): autoscaling = self.driver.client("autoscaling") logger.debug("Discovering auto scaling groups with name: %s", service_name) asg_name = AsgName(network=network_name, subnetwork=service_name) asgs = autoscaling.describe_auto_scaling_groups( AutoScalingGroupNames=[str(asg_name)]) logger.debug("Found asgs: %s", asgs) if len(asgs["AutoScalingGroups"]) > 1: raise BadEnvironmentStateException( "Expected to find at most one auto scaling group " "named: %s, output: %s" % (str(asg_name), asgs)) if not asgs["AutoScalingGroups"]: return None return asgs["AutoScalingGroups"][0] def discover_instances(instance_ids): ec2 = self.driver.client("ec2") logger.debug("Discovering instances: %s", instance_ids) instances = {"Reservations": []} # Need this check because if we pass an empty list the API returns all instances if instance_ids: instances = ec2.describe_instances(InstanceIds=instance_ids) return [instance for reservation in instances["Reservations"] for instance in reservation["Instances"]] # 1. Get List Of Instances discovery_retries = 0 discovery_complete = False while discovery_retries < RETRY_COUNT: try: asg = discover_asg(network.name, service_name) if not asg: return None instance_ids = [instance["InstanceId"] for instance in asg["Instances"]] instances = discover_instances(instance_ids) logger.debug("Discovered instances: %s", instances) discovery_complete = True except ClientError as client_error: # There is a race between when I discover the autoscaling group # itself and when I try to search for the instances inside it, # so just retry if this happens. logger.info("Recieved exception discovering instance: %s", client_error) if client_error.response["Error"]["Code"] == "InvalidInstanceID.NotFound": pass else: raise if discovery_complete: break discovery_retries = discovery_retries + 1 logger.info("Instance discovery retry number: %s", discovery_retries) if discovery_retries >= RETRY_COUNT: raise OperationTimedOut( "Exceeded retries while discovering %s, in network %s" % (service_name, network)) time.sleep(RETRY_DELAY) # 2. Get List Of Subnets subnetworks = self.subnetwork.get(network, service_name) # NOTE: In moto instance objects do not include a "SubnetId" and the IP addresses are # assigned randomly in the VPC, so for now just stripe instances across subnets. if self.mock: for instance, subnetwork, in zip(instances, itertools.cycle(subnetworks)): subnetwork.instances.append(canonicalize_instance_info(instance)) return Service(network=network, name=service_name, subnetworks=subnetworks) # 3. Group Services By Subnet for subnetwork in subnetworks: for instance in instances: if "SubnetId" in instance and subnetwork.subnetwork_id == instance["SubnetId"]: subnetwork.instances.append(canonicalize_instance_info(instance)) # 4. Profit! return Service(network=network, name=service_name, subnetworks=subnetworks)