def check(self): """ Check: Runs the check script. """ # 1. Load existing configuration state = self._load_state() if not self._load_state(): raise DisallowedOperationException( "Called check but found no state at %s. Call deploy first." % (self.state_file)) logger.debug("Loaded state: %s", state) network = self.client.network.get(state["network"]) if not network: raise DisallowedOperationException("Network %s not found!" % state["network"]) service = self.client.service.get(network, state["service"]) if not service: raise DisallowedOperationException("Service %s not found!" % state["service"]) instances = self.client.service.get_instances(service) if not instances: raise DisallowedOperationException( "No running instances in service %s!" % (state["service"])) if len(instances) > 1: raise DisallowedOperationException( "More than one running instance in service %s!" % (state["service"])) public_ip = instances[0].public_ip # 2. Run ./check <ssh_user> <ssh_key_path> return subprocess.run([ self.config.get_check_script_path(), state["ssh_username"], public_ip, self.private_key_path ], check=True)
def _get_network(self, network): if "Network" not in self.model.resources(): raise DisallowedOperationException( "Network model must be registered to use Subnet model.") networks = self.model.get("Network", network) if len(networks) != 1: raise DisallowedOperationException( "Matcher must match exactly one network, %s matched %s" % (network, networks)) return networks[0]
def get_image(image_specifier): images = [ image for image in self.driver.list_images() if re.match(image_specifier, image.name) ] if not images: raise DisallowedOperationException( "Could not find image named %s" % image_specifier) if len(images) > 1: raise DisallowedOperationException( "Found multiple images for specifier %s: %s" % (image_specifier, images)) return images[0]
def get_blueprint_tester(client, base_dir, fixture_type, fixture_options): """ Import the test boilerplate from the blueprint directory. """ if fixture_type != "python-blueprint-fixture": raise DisallowedOperationException( "Invalid fixture type: %s. Only python-blueprint-fixture is currently supported" % (fixture_type)) if "module_name" not in fixture_options: raise DisallowedOperationException( "\"module_name\" not set in \"fixture_options\" which is required for the " "\"python-blueprint-fixture\" type.") sys.path.append(base_dir) fixture = importlib.import_module(fixture_options["module_name"]) return fixture.BlueprintTest(client)
def create(self, name, service): """ Create new image named "name" from "service". """ instances = [ instance for subnetwork in service.subnetworks for instance in subnetwork.instances ] for gce_node in self.driver.list_nodes(): if gce_node.uuid == instances[0].instance_id: node = gce_node if not node: raise BadEnvironmentStateException( "Could not find instance in service %s" % service) if len(node.extra["disks"]) != 1: raise DisallowedOperationException( "Only support exactly one volume for image create: %s" % node.extra["disks"]) logger.info("Stopping node: %s", node.name) self.driver.ex_stop_node(node) logger.info("Creating image from service: %s", service.name) volume = self.driver.ex_get_volume( node.extra["disks"][0]["deviceName"]) image = self.driver.ex_create_image(name, volume) logger.info("Created image: %s", image.id) return Image(name=image.name, image_id=image.id, created_at=str(image.extra["creationTimestamp"]))
def create(self, network, service_name, blueprint, template_vars=None, count=None): """ Create a service in "network" named "service_name" with blueprint file at "blueprint". "template_vars" are passed to the initialization scripts as jinja2 variables. Example: example_network = client.network.create("example") example_service = client.service.create(network=example_network, name="example_service", blueprint="example-blueprint.yml") """ logger.debug( 'Creating service %s in network %s with blueprint %s, template_vars %s, ' 'and count %s', service_name, network, blueprint, template_vars, count) if not isinstance(network, Network): raise DisallowedOperationException( "Network argument to create must be of type cloudless.types.common.Network" ) return self.service.create(network, service_name, blueprint, template_vars, count)
def apply(self, resource_definition): subnets = self.get(resource_definition) if len(subnets) != 1: raise DisallowedOperationException( "Cannot apply, matched more than one subnet!: %s" % subnets) logger.info("Applying subnet: %s", subnets[0]) return subnets[0]
def get(self, resource_definition): ec2 = self.driver.client("ec2") firewall = resource_definition logger.info("Getting firewall: %s", firewall) network = self.network.get(firewall.network.name) search_filters = [] if not firewall.network or not firewall.network.name: raise DisallowedOperationException( "Network selector required when getting firewall") network = self.network.get(firewall.network.name) search_filters.append({ 'Name': 'vpc-id', 'Values': [network.network_id] }) search_filters.append({ 'Name': 'group-name', 'Values': [firewall.name] }) security_groups = ec2.describe_security_groups(Filters=search_filters) if len(security_groups["SecurityGroups"]) > 1: raise BadEnvironmentStateException( "Found multiple security groups with name %s, in vpc %s: %s" % (firewall.name, network.network_id, security_groups)) return [ Firewall(version=firewall.version, name=sg["GroupName"], network=firewall.network, id=sg["GroupId"]) for sg in security_groups["SecurityGroups"] ]
def apply(self, resource_definition): firewalls = self.get(resource_definition) if len(firewalls) != 1: raise DisallowedOperationException( "Cannot apply, matched more than one firewall!: %s" % firewalls) logger.info("Applying firewall: %s", firewalls[0]) return firewalls[0]
def create(self, name, blueprint): """ Create new network named "name" with blueprint file at "blueprint". """ ec2 = self.driver.client("ec2") if self.get(name): raise DisallowedOperationException("Found existing VPC named: %s" % name) if blueprint: network_blueprint = NetworkBlueprint.from_file(blueprint) else: network_blueprint = NetworkBlueprint("") allocation_blocks = network_blueprint.get_allowed_private_cidr() def get_cidr(prefix, address_range_includes, address_range_excludes): for address_range_include in address_range_includes: for cidr in generate_subnets(address_range_include, address_range_excludes, prefix, count=1): return str(cidr) raise NotEnoughIPSpaceException( "Could not allocate network of size " "%s in %s, excluding %s" % (prefix, address_range_includes, address_range_includes)) vpc = ec2.create_vpc(CidrBlock=get_cidr(network_blueprint.get_prefix(), [allocation_blocks], [])) vpc_id = vpc["Vpc"]["VpcId"] try: creation_retries = 0 while creation_retries < RETRY_COUNT: try: ec2.create_tags(Resources=[vpc_id], Tags=[{ "Key": "Name", "Value": name }]) if not self.get(name): time.sleep(float(RETRY_DELAY)) else: break except ec2.exceptions.ClientError as client_error: logger.debug("Received exception tagging VPC: %s", client_error) time.sleep(float(RETRY_DELAY)) creation_retries = creation_retries + 1 if creation_retries >= RETRY_COUNT: raise OperationTimedOut("Cannot find created VPC: %s" % vpc_id) except OperationTimedOut as exception: ec2.delete_vpc(VpcId=vpc_id) raise exception return canonicalize_network_info( name, vpc["Vpc"], self.driver.session.Session().region_name)
def _validate_args(self, source, destination): if (not isinstance(source, Service) and not isinstance(destination, Service) and not isinstance(source, CidrBlock) and not isinstance(destination, CidrBlock)): raise DisallowedOperationException( "Source and destination can only be a cloudless.types.networking.Service object or " "a cloudless.types.networking.CidrBlock object") if not isinstance(destination, Service) and not isinstance( source, Service): raise DisallowedOperationException( "Either destination or source must be a cloudless.types.networking.Service object" ) if (isinstance(source, Service) and isinstance(destination, Service) and source.network != destination.network): raise DisallowedOperationException( "Destination and source must be in the same network if specified as services" )
def get_instance(instance_id): reservations = ec2.describe_instances(InstanceIds=[instance_id]) raw_instances = [ instance for reservation in reservations["Reservations"] for instance in reservation["Instances"] ] if len(raw_instances) != 1: raise DisallowedOperationException( "Service must have exactly one instance, found %s" % raw_instances) return raw_instances[0]
def destroy(self, image): """ Destroy the given image. Example: client.image.destroy(client.image.get("myimage")) """ logger.debug('Destroying image %s', image) if not isinstance(image, Image): raise DisallowedOperationException( "Argument to destroy must be of type cloudless.types.common.Image" ) return self.image.destroy(image)
def destroy(self, network): """ Destroy the given network. Example: client.network.destroy(client.network.get("mynetwork")) """ logger.debug('Destroying network %s', network) if not isinstance(network, Network): raise DisallowedOperationException( "Argument to destroy must be of type cloudless.types.common.Network" ) return self.network.destroy(network)
def remove(self, source, destination, port): """ Remove a route from "source" to "destination". """ # Currently controlling egress in AWS is not supported. All egress is always allowed. if not isinstance(destination, Service): raise DisallowedOperationException( "Destination must be a cloudless.types.networking.Service object" ) ec2 = self.driver.client("ec2") dest_sg_id, _, _, src_ip_permissions = self._extract_service_info( source, destination, port) ec2.revoke_security_group_ingress(GroupId=dest_sg_id, IpPermissions=src_ip_permissions)
def create(self, name, service): """ Create new image named "name" from "service". Example: client.image.create("myimage", service) """ logger.debug('Creating image %s from service %s', name, service) if not isinstance(service, Service): raise DisallowedOperationException( "Service argument to create must be of type cloudless.types.common.Service" ) return self.image.create(name, service)
def get_instances(self, service): """ Helper to return the list of instances given a service object. Example: example_service = client.service.get(network=client.network.get("example"), name="example_service") instances = client.service.get_instances(example_service) """ if not isinstance(service, Service): raise DisallowedOperationException( "Service argument to get_instances must be of type cloudless.types.common.Service" ) return [i for s in service.subnetworks for i in s.instances]
def destroy(self, service): """ Destroy a service described by the "service" object. Example: example_service = client.service.get(network=client.network.get("example"), name="example_service") client.service.destroy(example_service) """ logger.debug('Destroying service %s', service) if not isinstance(service, Service): raise DisallowedOperationException( "Service argument to destroy must be of type cloudless.types.common.Service" ) return self.service.destroy(service)
def get(self, network, service_name): """ Get a service in "network" named "service_name". Example: example_service = client.service.get(network=client.network.get("example"), name="example_service") """ logger.debug('Discovering service %s in network %s', service_name, network) if not isinstance(network, Network): raise DisallowedOperationException( "Network argument to get must be of type cloudless.types.common.Network" ) return self.service.get(network, service_name)
def node_types(self): """ Get a list of node sizes to use for matching resource requirements to instance type. """ # Need to do this because the "list_sizes" function doesn't seem to work # with region strings. zones = self.driver.ex_list_zones() for zone in zones: if zone.name.startswith(DEFAULT_REGION): node_sizes = self.driver.list_sizes(location=zone) return [ canonicalize_node_size(node_size) for node_size in node_sizes ] raise DisallowedOperationException( "Could not find zone in region: %s" % DEFAULT_REGION)
def deploy(self): """ Deploy: Deploys the image on the cloud provider. """ # 1. Deploy a temporary network state = {} state["network"] = generate_unique_name("image-build") state["service"] = "image-build" # Save state first in case something fails if self._load_state(): raise DisallowedOperationException( "Called deploy but found existing state %s in state file %s" % (state, self.state_file)) self._save_state(state) network = self.client.network.create(state["network"], NETWORK_BLUEPRINT) # 2. Create temporary ssh keys keypair = generate_ssh_keypair() self._save_keypair(keypair) state["ssh_username"] = "******" state["ssh_public_key"] = self.public_key_path state["ssh_private_key"] = self.private_key_path self._save_state(state) # 3. Deploy a service with one instance in that network template_vars = { "cloudless_image_build_ssh_key": keypair.public_key, "cloudless_image_build_ssh_username": state["ssh_username"] } service = self.client.service.create(network, state["service"], self.config.get_blueprint_path(), template_vars=template_vars, count=1) # 4. Allow port 22 to that instance internet = CidrBlock("0.0.0.0/0") self.client.paths.add(internet, service, 22) return service, state
def add(self, source, destination, port): """ Adds a route from "source" to "destination". """ logger.debug("Adding path from %s to %s", source, destination) if self.has_access(source, destination, port): logger.info("Service %s already has access to %s on port: %s", source, destination, port) return True # Currently controlling egress in AWS is not supported. All egress is always allowed. if not isinstance(destination, Service): raise DisallowedOperationException( "Destination must be a cloudless.types.networking.Service object" ) dest_sg_id, _, _, src_ip_permissions = self._extract_service_info( source, destination, port) ec2 = self.driver.client("ec2") ec2.authorize_security_group_ingress(GroupId=dest_sg_id, IpPermissions=src_ip_permissions) return Path(destination.network, source, destination, "tcp", port)
def get_blockdev_info(): raw_instance = get_instance(instances[0].instance_id) logger.debug("Getting blockdev info from: %s", raw_instance) if len(raw_instance["BlockDeviceMappings"]) != 1: raise DisallowedOperationException( "Currently only support saving instances with one blockdev, found %s" % (raw_instance)) volume_id = raw_instance["BlockDeviceMappings"][0]["Ebs"][ "VolumeId"] volumes = ec2.describe_volumes(VolumeIds=[volume_id]) if len(volumes["Volumes"]) != 1: raise BadEnvironmentStateException( "Found two volumes with the same id: %s" % volumes) volume = volumes["Volumes"][0] return { "DeviceName": raw_instance["BlockDeviceMappings"][0]["DeviceName"], "Ebs": { "Encrypted": volume["Encrypted"], "DeleteOnTermination": True, "VolumeSize": volume["Size"], "VolumeType": volume["VolumeType"] } }
def create(self, name, service): """ Create new image named "name" from "service". """ ec2 = self.driver.client("ec2") instances = [ instance for subnetwork in service.subnetworks for instance in subnetwork.instances ] if len(instances) != 1: raise DisallowedOperationException( "Service must have exactly one instance, found %s" % instances) def get_instance(instance_id): reservations = ec2.describe_instances(InstanceIds=[instance_id]) raw_instances = [ instance for reservation in reservations["Reservations"] for instance in reservation["Instances"] ] if len(raw_instances) != 1: raise DisallowedOperationException( "Service must have exactly one instance, found %s" % raw_instances) return raw_instances[0] # First, stop instances to prevent the state from changing while we're snapshotting. logger.info("Stopping instance: %s", instances[0].instance_id) autoscaling = self.driver.client("autoscaling") # Must detach from autoscaling group otherwise our instance will get terminated. See # https://stackoverflow.com/a/28883869. # # Also see https://github.com/getcloudless/cloudless/issues/20. def detach_from_asg(service, instance_id): asg_name = str( AsgName(network=service.network.name, subnetwork=service.name)) autoscaling.update_auto_scaling_group( AutoScalingGroupName=asg_name, MinSize=0) self.asg.wait_for_in_service(asg_name, instance_id) autoscaling.detach_instances(InstanceIds=[instance_id], AutoScalingGroupName=asg_name, ShouldDecrementDesiredCapacity=True) detach_from_asg(service, instances[0].instance_id) def retry_if_timeout(exception): """ Checks if this exception is just because we haven't converged yet. """ return isinstance(exception, OperationTimedOut) ec2.stop_instances(InstanceIds=[instances[0].instance_id]) @retry(wait_fixed=RETRY_DELAY, stop_max_attempt_number=RETRY_COUNT, retry_on_exception=retry_if_timeout) def wait_for_stopped(instance_id): raw_instance = get_instance(instance_id) logger.debug("Current state: %s", raw_instance) if raw_instance["State"]["Name"] != "stopped": raise OperationTimedOut( "Timed out waiting for instance: %s to stop" % instance_id) wait_for_stopped(instances[0].instance_id) # Get information about the instance's block device def get_blockdev_info(): raw_instance = get_instance(instances[0].instance_id) logger.debug("Getting blockdev info from: %s", raw_instance) if len(raw_instance["BlockDeviceMappings"]) != 1: raise DisallowedOperationException( "Currently only support saving instances with one blockdev, found %s" % (raw_instance)) volume_id = raw_instance["BlockDeviceMappings"][0]["Ebs"][ "VolumeId"] volumes = ec2.describe_volumes(VolumeIds=[volume_id]) if len(volumes["Volumes"]) != 1: raise BadEnvironmentStateException( "Found two volumes with the same id: %s" % volumes) volume = volumes["Volumes"][0] return { "DeviceName": raw_instance["BlockDeviceMappings"][0]["DeviceName"], "Ebs": { "Encrypted": volume["Encrypted"], "DeleteOnTermination": True, "VolumeSize": volume["Size"], "VolumeType": volume["VolumeType"] } } block_device = get_blockdev_info() # Save the image and return image data def get_image(image_id): images = ec2.describe_images(ImageIds=[image_id]) if len(images["Images"]) != 1: raise BadEnvironmentStateException( "Expected exactly one image, found %s" % images) return images["Images"][0] @retry(wait_fixed=RETRY_DELAY, stop_max_attempt_number=RETRY_COUNT, retry_on_exception=retry_if_timeout) def wait_for_available(image_id): image = get_image(image_id) logger.debug("Current image state: %s", image) if image["State"] != "available": raise OperationTimedOut( "Timed out waiting for image %s to be available." % image_id) logger.info("Creating image from instance: %s", instances[0].instance_id) image_id = ec2.create_image(InstanceId=instances[0].instance_id, Name=name, BlockDeviceMappings=[block_device]) wait_for_available(image_id["ImageId"]) logger.info("Created image: %s", image_id["ImageId"]) image = get_image(image_id["ImageId"]) # Terminate the instance so it doesn't cause us to fail deleting our service. This is # unfortunately brittle and if something fails before this point we'll be in this weird # state where the security group will have a dependency. That's not acceptable, but really # it depends on fixing: https://github.com/getcloudless/cloudless/issues/20 because the ASG # only reports the running instances and that's how the service destroy discovers them. ec2.terminate_instances(InstanceIds=[instances[0].instance_id]) return Image(image_id=image["ImageId"], name=image["Name"], created_at=image["CreationDate"])
def destroy(self, network): """ Destroy a network given the provided network object. """ ec2 = self.driver.client("ec2") # Check to see if we have any subnets, otherwise bail out subnets = ec2.describe_subnets(Filters=[{ 'Name': 'vpc-id', 'Values': [network.network_id] }]) if subnets["Subnets"]: message = "Found subnets in network, cannot delete: %s" % subnets logger.error(message) raise DisallowedOperationException(message) # Delete internet gateway if it's no longer referenced igw = ec2.describe_internet_gateways( Filters=[{ 'Name': 'attachment.vpc-id', 'Values': [network.network_id] }]) igw_id = None if len(igw["InternetGateways"]) == 1: igw_id = igw["InternetGateways"][0]["InternetGatewayId"] elif len(igw["InternetGateways"]) > 1: raise Exception( "Invalid response from describe_internet_gateways: %s" % igw) if igw_id and not self.internet_gateways.route_count( network.network_id, igw_id): ec2.detach_internet_gateway(InternetGatewayId=igw_id, VpcId=network.network_id) ec2.delete_internet_gateway(InternetGatewayId=igw_id) # Since we check above that there are no subnets, and therefore nothing # deployed in this VPC, for now assume it is safe to delete. security_groups = ec2.describe_security_groups( Filters=[{ 'Name': 'vpc-id', 'Values': [network.network_id] }]) for security_group in security_groups["SecurityGroups"]: if security_group["GroupName"] == "default": continue logger.info("Deleting security group: %s", security_group["GroupName"]) ec2.delete_security_group(GroupId=security_group["GroupId"]) # Delete internet gateway, also safe because our subnets are gone. igws = ec2.describe_internet_gateways( Filters=[{ 'Name': 'attachment.vpc-id', 'Values': [network.network_id] }]) logger.info("Deleting internet gateways: %s", igws) for igw in igws["InternetGateways"]: logger.info("Deleting internet gateway: %s", igw) igw_id = igw["InternetGatewayId"] ec2.detach_internet_gateway(InternetGatewayId=igw_id, VpcId=network.network_id) ec2.delete_internet_gateway(InternetGatewayId=igw_id) # Now, actually delete the VPC try: deletion_result = ec2.delete_vpc(VpcId=network.network_id) except ec2.exceptions.ClientError as client_error: if client_error.response['Error']['Code'] == 'DependencyViolation': logger.info("Dependency violation deleting VPC: %s", client_error) raise client_error return deletion_result
def _extract_service_info(self, source, destination, port): """ Helper to extract the necessary information from the source and destination arguments. """ if (not isinstance(source, Service) and not isinstance(destination, Service) and not isinstance(source, CidrBlock) and not isinstance(destination, CidrBlock)): raise DisallowedOperationException( "Source and destination can only be a cloudless.types.networking.Service object or " "a cloudless.types.networking.CidrBlock object") if not isinstance(destination, Service) and not isinstance( source, Service): raise DisallowedOperationException( "Either destination or source must be a cloudless.types.networking.Service object" ) if (isinstance(source, Service) and isinstance(destination, Service) and source.network != destination.network): raise DisallowedOperationException( "Destination and source must be in the same network if specified as services" ) src_ip_permissions = [] dest_ip_permissions = [] src_sg_id = None dest_sg_id = None if isinstance(source, Service): src_sg_id = self.asg.get_launch_configuration_security_group( source.network.name, source.name) src_ip_permissions.append({ 'FromPort': port, 'ToPort': port, 'IpProtocol': 'tcp', 'UserIdGroupPairs': [{ 'GroupId': src_sg_id }] }) if isinstance(source, CidrBlock): src_ip_permissions.append({ 'FromPort': port, 'ToPort': port, 'IpProtocol': 'tcp', 'IpRanges': [{ 'CidrIp': str(source.cidr_block) }] }) if isinstance(destination, Service): dest_sg_id = self.asg.get_launch_configuration_security_group( destination.network.name, destination.name) dest_ip_permissions.append({ 'FromPort': port, 'ToPort': port, 'IpProtocol': 'tcp', 'UserIdGroupPairs': [{ 'GroupId': dest_sg_id }] }) if isinstance(destination, CidrBlock): dest_ip_permissions.append({ 'FromPort': port, 'ToPort': port, 'IpProtocol': 'tcp', 'IpRanges': [{ 'CidrIp': str(destination.cidr_block) }] }) return dest_sg_id, src_sg_id, dest_ip_permissions, src_ip_permissions
def setup(client, config): """ Create all the boilerplate to spin up the service, and the service itself. """ logger.debug("Running setup to test: %s", config) config_obj = BlueprintTestConfiguration(config) state = get_state(config_obj) if state: raise DisallowedOperationException("Found non empty state file: %s" % state) network_name = generate_unique_name("test-network") service_name = generate_unique_name("test-service") key_pair = generate_ssh_keypair() state = { "network_name": network_name, "service_name": service_name, "public_key": key_pair.public_key, "private_key": key_pair.private_key } logger.debug("Saving state: %s now in case something fails", state) save_state(state, config_obj) save_key_pair(key_pair, config_obj) logger.debug("Creating test network: %s", network_name) network = client.network.create(network_name, NETWORK_BLUEPRINT) logger.debug("Calling the pre service setup in test fixture") blueprint_tester = get_blueprint_tester( client, config_obj.get_config_dir(), config_obj.get_create_fixture_type(), config_obj.get_create_fixture_options()) setup_info = blueprint_tester.setup_before_tested_service(network) if not isinstance(setup_info, SetupInfo): raise DisallowedOperationException( "Test fixture must return cloudless.testutils.fixture.SetupInfo object!" "Found: %s" % setup_info) state["setup_info"] = { "deployment_info": setup_info.deployment_info, "blueprint_vars": setup_info.blueprint_vars, } state["ssh_username"] = "******" logger.debug("Saving full state: %s", state) save_state(state, config_obj) # Add SSH key to the instance using reserved variables if "cloudless_test_framework_ssh_key" in setup_info.blueprint_vars: raise DisallowedOperationException( "cloudless_test_framework_ssh_key is a parameter reserved by the test framework " "and cannot be returned by the test fixture. Found: %s" % (setup_info.blueprint_vars)) setup_info.blueprint_vars[ "cloudless_test_framework_ssh_key"] = key_pair.public_key if "cloudless_test_framework_ssh_username" in setup_info.blueprint_vars: raise DisallowedOperationException( "cloudless_test_framework_ssh_username is a parameter reserved by the test " "framework and cannot be returned by the test fixture. Found: %s" % (setup_info.blueprint_vars)) setup_info.blueprint_vars["cloudless_test_framework_ssh_username"] = state[ "ssh_username"] logger.debug("Creating services using the blueprint under test") service = client.service.create(network, service_name, config_obj.get_blueprint_path(), setup_info.blueprint_vars, count=config_obj.get_count()) logger.debug("Calling the post service setup in test fixture") blueprint_tester.setup_after_tested_service(network, service, setup_info) logger.debug("Allowing SSH to test service") internet = CidrBlock("0.0.0.0/0") client.paths.add(internet, service, 22) logger.debug("Test service instances: %s", client.service.get_instances(service)) return (service, state["ssh_username"], private_key_path(config_obj))
def _save(self, mock=False): """ Save: Saves the image after it is configured and checked (must run after configure/check) This is internal and not exposed on the command line because you should only save an image as part of a full build. All the other steps here are for debugging and development. """ # 1. Load existing configuration state = self._load_state() if not self._load_state(): raise DisallowedOperationException( "Called save but found no state at %s. Call deploy first." % (self.state_file)) logger.debug("Loaded state: %s", state) network = self.client.network.get(state["network"]) if not network: raise DisallowedOperationException("Network %s not found!" % state["network"]) service = self.client.service.get(network, state["service"]) if not service: raise DisallowedOperationException("Service %s not found!" % state["service"]) instances = self.client.service.get_instances(service) if not instances: raise DisallowedOperationException( "No running instances in service %s!" % (state["service"])) if len(instances) > 1: raise DisallowedOperationException( "More than one running instance in service %s!" % (state["service"])) public_ip = instances[0].public_ip # 2. Remove test keys if not mock: ssh = paramiko.SSHClient() ssh_key = paramiko.RSAKey(file_obj=open(state["ssh_private_key"])) ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) ssh.connect(hostname=public_ip, username=state["ssh_username"], pkey=ssh_key) # This removes all permissions from the temporary user's home directory and sets the # account to expire immediately. We unfortunately can't completely delete this # temporary account because we are currently logged in. def try_run(client, cmd): logger.info("running '%s'", cmd) _, stdout, stderr = client.exec_command(cmd) exit_status = stdout.channel.recv_exit_status() if exit_status: raise Exception( "Failed to delete image build user on image: %s. " "Exit code: %s." % (stderr.read(), exit_status)) logger.debug("Stdout: %s", stdout.read()) try_run(ssh, 'cd /tmp') try_run(ssh, 'sudo chmod -R 000 /home/%s/' % state["ssh_username"]) try_run(ssh, 'sudo usermod --expiredate 1 %s' % state["ssh_username"]) logger.info("Deleted test user: %s", state["ssh_username"]) ssh.close() # 3. Save the image with the correct name logger.debug("Saving service %s with name: %s", service.name, self.config.get_image_name()) image = self.client.image.create(self.config.get_image_name(), service) logger.info("Image saved with name: %s", self.config.get_image_name()) return image
def create(self, network, service_name, blueprint, template_vars, count): """ Create a service in "network" named "service_name" with blueprint file at "blueprint". """ logger.debug( 'Creating service %s, %s with blueprint %s and ' 'template_vars %s', network.name, service_name, blueprint, template_vars) self.subnetwork.create(network.name, service_name, blueprint=blueprint) instances_blueprint = ServiceBlueprint.from_file(blueprint) az_count = instances_blueprint.availability_zone_count() availability_zones = list( itertools.islice(self._get_availability_zones(), az_count)) if len(availability_zones) < az_count: raise DisallowedOperationException( "Do not have %s availability zones: %s" % (az_count, availability_zones)) instance_count = az_count if count: instance_count = count def get_image(image_specifier): images = [ image for image in self.driver.list_images() if re.match(image_specifier, image.name) ] if not images: raise DisallowedOperationException( "Could not find image named %s" % image_specifier) if len(images) > 1: raise DisallowedOperationException( "Found multiple images for specifier %s: %s" % (image_specifier, images)) return images[0] image = get_image(instances_blueprint.image()) instance_type = get_fitting_instance(self, instances_blueprint) for availability_zone, instance_num in zip( itertools.cycle(availability_zones), range(0, instance_count)): full_subnetwork_name = "%s-%s" % (network.name, service_name) instance_name = "%s-%s" % (full_subnetwork_name, instance_num) metadata = [{ "key": "startup-script", "value": instances_blueprint.runtime_scripts(template_vars) }, { "key": "network", "value": network.name }, { "key": "subnetwork", "value": service_name }] logger.info('Creating instance %s in zone %s', instance_name, availability_zone.name) self.driver.create_node(instance_name, instance_type, image, location=availability_zone, ex_network=network.name, ex_subnetwork=full_subnetwork_name, external_ip="ephemeral", ex_metadata=metadata, ex_tags=[full_subnetwork_name]) return self.get(network, service_name)