class BotoBalanceInterface(BalanceInterface):
    conn = None
    saveclcdata = False

    def __init__(self, clc_host, access_id, secret_key, token):
        #boto.set_stream_logger('foo')
        path='/services/elb'
        port=8773
        if clc_host[len(clc_host)-13:] == 'amazonaws.com':
            clc_host = clc_host.replace('ec2', 'elasticloadbalancing', 1)
            path = '/'
            reg = None
            port=443
        reg = RegionInfo(name='eucalyptus', endpoint=clc_host)
        self.conn = ELBConnection(access_id, secret_key, region=reg,
                                  port=port, path=path,
                                  is_secure=True, security_token=token, debug=0)
        self.conn.https_validate_certificates = False
        self.conn.http_connection_kwargs['timeout'] = 30

    def __save_json__(self, obj, name):
        f = open(name, 'w')
        json.dump(obj, f, cls=BotoJsonBalanceEncoder, indent=2)
        f.close()

    def create_load_balancer(self, name, zones, listeners, subnets=None,
                             security_groups=None, scheme='internet-facing'):
        return self.conn.create_load_balancer(name, zones, listeners, subnets, security_groups, scheme)
    
    def delete_load_balancer(self, name):
        return self.conn.delete_load_balancer(name)

    def get_all_load_balancers(self, load_balancer_names=None):
        return []
        obj = self.conn.get_all_load_balancers(load_balancer_names)
        if self.saveclcdata:
            self.__save_json__(obj, "mockdata/ELB_Balancers.json")
        return obj

    def deregister_instances(self, load_balancer_name, instances):
        return self.conn.deregister_instances(load_balancer_name, instances)

    def register_instances(self, load_balancer_name, instances):
        return self.conn.register_instances(load_balancer_name, instances)

    def create_load_balancer_listeners(self, name, listeners):
        return self.conn.create_load_balancer_listeners(name, listeners)

    def delete_load_balancer_listeners(self, name, ports):
        return self.conn.delete_load_balancer_listeners(name, ports)

    def configure_health_check(self, name, health_check):
        return self.conn.configure_health_check(name, health_check)

    def describe_instance_health(self, load_balancer_name, instances=None):
        obj = self.conn.describe_instance_health(load_balancer_name, instances)
        if self.saveclcdata:
            self.__save_json__(obj, "mockdata/ELB_Instances.json")
        return obj
class BotoBalanceInterface(BalanceInterface):
    conn = None
    saveclcdata = False

    def __init__(self, clc_host, access_id, secret_key, token):
        self.access_id = access_id
        self.secret_key = secret_key
        self.token = token
        self.set_endpoint(clc_host)

    def set_endpoint(self, endpoint):
        #boto.set_stream_logger('foo')
        reg = RegionInfo(name='eucalyptus', endpoint=endpoint)
        path = '/services/LoadBalancing'
        port = 8773
        if endpoint[len(endpoint)-13:] == 'amazonaws.com':
            endpoint = endpoint.replace('ec2', 'elasticloadbalancing', 1)
            path = '/'
            reg = RegionInfo(endpoint=endpoint)
            port = 443
        self.conn = ELBConnection(self.access_id, self.secret_key, region=reg,
                                  port=port, path=path,
                                  is_secure=True, security_token=self.token, debug=0)
        self.conn.https_validate_certificates = False
        self.conn.http_connection_kwargs['timeout'] = 30

    def __save_json__(self, obj, name):
        f = open(name, 'w')
        json.dump(obj, f, cls=BotoJsonBalanceEncoder, indent=2)
        f.close()

    def create_load_balancer(self, name, zones, listeners, subnets=None,
                             security_groups=None, scheme='internet-facing'):
        return self.conn.create_load_balancer(name, zones, listeners, subnets, security_groups, scheme)

    def delete_load_balancer(self, name):
        return self.conn.delete_load_balancer(name)

    def get_all_load_balancers(self, load_balancer_names=None):
        params = {}
        if load_balancer_names:
            self.build_list_params(params, load_balancer_names,
                                   'LoadBalancerNames.member.%d')
        http_request = self.conn.build_base_http_request('GET', '/', None,
                                                         params, {}, '',
                                                         self.conn.server_name())
        http_request.params['Action'] = 'DescribeLoadBalancers'
        http_request.params['Version'] = self.conn.APIVersion
        response = self.conn._mexe(http_request, override_num_retries=2)
        body = response.read()
        boto.log.debug(body)
        if not body:
            boto.log.error('Null body %s' % body)
            raise self.conn.ResponseError(response.status, response.reason, body)
        elif response.status == 200:
            obj = boto.resultset.ResultSet([('member', boto.ec2.elb.loadbalancer.LoadBalancer)])
            h = boto.handler.XmlHandler(obj, self.conn)
            import xml.sax;

            xml.sax.parseString(body, h)
            if self.saveclcdata:
                self.__save_json__(obj, "mockdata/ELB_Balancers.json")
            return obj
        else:
            boto.log.error('%s %s' % (response.status, response.reason))
            boto.log.error('%s' % body)
            raise self.conn.ResponseError(response.status, response.reason, body)


    def deregister_instances(self, load_balancer_name, instances):
        return self.conn.deregister_instances(load_balancer_name, instances)

    def register_instances(self, load_balancer_name, instances):
        return self.conn.register_instances(load_balancer_name, instances)

    def create_load_balancer_listeners(self, name, listeners):
        return self.conn.create_load_balancer_listeners(name, listeners)

    def delete_load_balancer_listeners(self, name, ports):
        return self.conn.delete_load_balancer_listeners(name, ports)

    def configure_health_check(self, name, health_check):
        return self.conn.configure_health_check(name, health_check)

    def describe_instance_health(self, load_balancer_name, instances=None):
        obj = self.conn.describe_instance_health(load_balancer_name, instances)
        if self.saveclcdata:
            self.__save_json__(obj, "mockdata/ELB_Instances.json")
        return obj
Exemple #3
0
class EC2System(WrapanapiAPIBase):
    """EC2 Management System, powered by boto

    Wraps the EC2 API and mimics the behavior of other implementors of
    MgmtServiceAPIBase for us in VM control testing

    Instead of username and password, accepts access_key_id and
    secret_access_key, the AWS analogs to those ideas. These are passed, along
    with any kwargs, straight through to boto's EC2 connection factory. This
    allows customization of the EC2 connection, to connect to another region,
    for example.

    For the purposes of the EC2 system, a VM's instance ID is its name because
    EC2 instances don't have to have unique names.

    Args:
        *kwargs: Arguments to connect, usually, username, password, region.
    Returns: A :py:class:`EC2System` object.
    """

    _stats_available = {
        'num_vm': lambda self: len(self.list_vm()),
        'num_template': lambda self: len(self.list_template()),
    }

    states = {
        'running': ('running', ),
        'stopped': ('stopped', 'terminated'),
        'suspended': (),
        'deleted': ('terminated', ),
    }

    can_suspend = False

    def __init__(self, **kwargs):
        super(EC2System, self).__init__(kwargs)
        username = kwargs.get('username')
        password = kwargs.get('password')

        regionname = kwargs.get('region')
        region = get_region(kwargs.get('region'))
        self.api = EC2Connection(username, password, region=region)
        self.sqs_connection = connection.SQSConnection(
            username,
            password,
            region=_regions(regionmodule=sqs, regionname=regionname))
        self.elb_connection = ELBConnection(username,
                                            password,
                                            region=_regions(
                                                regionmodule=elb,
                                                regionname=regionname))
        self.s3_connection = boto3.resource(
            's3',
            aws_access_key_id=username,
            aws_secret_access_key=password,
            region_name=regionname,
            config=Config(signature_version='s3v4'))
        self.ec2_connection = boto3.client(
            'ec2',
            aws_access_key_id=username,
            aws_secret_access_key=password,
            region_name=regionname,
            config=Config(signature_version='s3v4'))
        self.stackapi = CloudFormationConnection(
            username,
            password,
            region=_regions(regionmodule=cloudformation,
                            regionname=regionname))
        self.sns_connection = boto3.client('sns', region_name=regionname)
        self.kwargs = kwargs

    def disconnect(self):
        """Disconnect from the EC2 API -- NOOP

        AWS EC2 service is stateless, so there's nothing to disconnect from
        """
        pass

    def info(self):
        """Returns the current versions of boto and the EC2 API being used"""
        return '%s %s' % (boto.UserAgent, self.api.APIVersion)

    def list_vm(self, include_terminated=True):
        """Returns a list from instance IDs currently active on EC2 (not terminated)"""
        instances = None
        if include_terminated:
            instances = [inst for inst in self._get_all_instances()]
        else:
            instances = [
                inst for inst in self._get_all_instances()
                if inst.state != 'terminated'
            ]
        return [i.tags.get('Name', i.id) for i in instances]

    def list_template(self):
        private_images = self.api.get_all_images(
            owners=['self'], filters={'image-type': 'machine'})
        shared_images = self.api.get_all_images(
            executable_by=['self'], filters={'image-type': 'machine'})
        combined_images = list(set(private_images) | set(shared_images))
        # Try to pull the image name (might not exist), falling back on ID (must exist)
        return map(lambda i: i.name or i.id, combined_images)

    def list_flavor(self):
        raise NotImplementedError(
            'This function is not supported on this platform.')

    def vm_status(self, instance_id):
        """Returns the status of the requested instance

        Args:
            instance_id: ID of the instance to inspect
        Returns: Instance status.

        See this `page <http://docs.aws.amazon.com/AWSEC2/latest/APIReference/
        ApiReference-ItemType-InstanceStateType.html>`_ for possible return values.

        """
        instance = self._get_instance(instance_id)
        return instance.state

    def vm_type(self, instance_id):
        """Returns the instance type of the requested instance
            e.g. m1.medium, m3.medium etc..

                Args:
                    instance_id: ID of the instance to inspect
                Returns: Instance type.
        """
        instance = self._get_instance(instance_id)
        return instance.instance_type

    def vm_creation_time(self, instance_id):
        instance = self._get_instance(instance_id)
        # Example instance.launch_time: 2014-08-13T22:09:40.000Z
        launch_time = datetime.strptime(instance.launch_time,
                                        '%Y-%m-%dT%H:%M:%S.%fZ')
        # use replace here to make tz-aware. python doesn't handle single 'Z' as UTC
        return launch_time.replace(tzinfo=pytz.UTC)

    def create_vm(self,
                  image_id,
                  min_count=1,
                  max_count=1,
                  instance_type='t1.micro',
                  vm_name=''):
        """
            Creates aws instances.
        TODO:
            Check whether instances were really created.
            Add additional arguments to be able to modify settings for instance creation.
        Args:
            image_id: ID of AMI
            min_count: Minimal count of instances - useful only if creating thousand of instances
            max_count: Maximal count of instances - defaults to 1
            instance_type: Type of instances, catalog of instance types is here:
                https://aws.amazon.com/ec2/instance-types/
                Defaults to 't1.micro' which is the least expensive instance type

            vm_name: Name of instances, can be blank

        Returns:
            List of created aws instances' IDs.
        """
        self.logger.info(
            " Creating instances[%d] with name %s,type %s and image ID: %s ",
            max_count, vm_name, instance_type, image_id)
        try:
            result = self.ec2_connection.run_instances(
                ImageId=image_id,
                MinCount=min_count,
                MaxCount=max_count,
                InstanceType=instance_type,
                TagSpecifications=[
                    {
                        'ResourceType': 'instance',
                        'Tags': [
                            {
                                'Key': 'Name',
                                'Value': vm_name,
                            },
                        ]
                    },
                ])
            instances = result.get('Instances')
            instance_ids = []
            for instance in instances:
                instance_ids.append(instance.get('InstanceId'))
            return instance_ids
        except Exception:
            self.logger.exception(
                "Create of {} instance failed.".format(vm_name))
            return None

    def delete_vm(self, instance_id):
        """Deletes the an instance

        Args:
            instance_id: ID of the instance to act on
        Returns: Whether or not the backend reports the action completed
        """
        self.logger.info(" Terminating EC2 instance %s" % instance_id)
        instance_id = self._get_instance_id_by_name(instance_id)
        try:
            self.api.terminate_instances([instance_id])
            self._block_until(instance_id, self.states['deleted'])
            return True
        except ActionTimedOutError:
            return False

    def describe_stack(self, stack_name):
        """Describe stackapi

        Returns the description for the specified stack
        Args:
            stack_name: Unique name of stack
        """
        result = []
        stacks = self.stackapi.describe_stacks(stack_name)
        result.extend(stacks)
        return result

    def stack_exist(self, stack_name):
        stacks = [
            stack for stack in self.describe_stack(stack_name)
            if stack.stack_name == stack_name
        ]
        if stacks:
            return bool(stacks)

    def delete_stack(self, stack_name):
        """Deletes stack

        Args:
            stack_name: Unique name of stack
        """
        self.logger.info(" Terminating EC2 stack {}".format(stack_name))
        try:
            self.stackapi.delete_stack(stack_name)
            return True
        except ActionTimedOutError:
            return False

    def start_vm(self, instance_id):
        """Start an instance

        Args:
            instance_id: ID of the instance to act on
        Returns: Whether or not the backend reports the action completed
        """
        self.logger.info(" Starting EC2 instance %s" % instance_id)
        instance_id = self._get_instance_id_by_name(instance_id)
        try:
            self.api.start_instances([instance_id])
            self._block_until(instance_id, self.states['running'])
            return True
        except ActionTimedOutError:
            return False

    def stop_vm(self, instance_id):
        """Stop an instance

        Args:
            instance_id: ID of the instance to act on
        Returns: Whether or not the backend reports the action completed
        """
        self.logger.info(" Stopping EC2 instance %s" % instance_id)
        instance_id = self._get_instance_id_by_name(instance_id)
        try:
            self.api.stop_instances([instance_id])
            self._block_until(instance_id, self.states['stopped'], timeout=360)
            return True
        except ActionTimedOutError:
            return False

    def restart_vm(self, instance_id):
        """Restart an instance

        Args:
            instance_id: ID of the instance to act on
        Returns: Whether or not the backend reports the action completed

        The action is taken in two separate calls to EC2. A 'False' return can
        indicate a failure of either the stop action or the start action.

        Note: There is a reboot_instances call available on the API, but it provides
            less insight than blocking on stop_vm and start_vm. Furthermore,
            there is no "rebooting" state, so there are potential monitoring
            issues that are avoided by completing these steps atomically
        """
        self.logger.info(" Restarting EC2 instance %s" % instance_id)
        return self.stop_vm(instance_id) and self.start_vm(instance_id)

    def is_vm_state(self, instance_id, state):
        return self.vm_status(instance_id) in state

    def is_vm_running(self, instance_id):
        """Is the VM running?

        Args:
            instance_id: ID of the instance to inspect
        Returns: Whether or not the requested instance is running
        """
        try:
            running = self.vm_status(instance_id) in self.states['running']
            return running
        except:
            return False

    def wait_vm_running(self, instance_id, num_sec=360):
        self.logger.info(
            " Waiting for EC2 instance %s to change status to running" %
            instance_id)
        wait_for(self.is_vm_running, [instance_id], num_sec=num_sec)

    def is_vm_stopped(self, instance_id):
        """Is the VM stopped?

        Args:
            instance_id: ID of the instance to inspect
        Returns: Whether or not the requested instance is stopped
        """
        return self.vm_status(instance_id) in self.states['stopped']

    def wait_vm_stopped(self, instance_id, num_sec=360):
        self.logger.info(
            " Waiting for EC2 instance %s to change status to stopped or terminated"
            % instance_id)
        wait_for(self.is_vm_stopped, [instance_id], num_sec=num_sec)

    def suspend_vm(self, instance_id):
        """Suspend a VM: Unsupported by EC2

        Args:
            instance_id: ID of the instance to act on
        Raises:
            ActionNotSupported: The action is not supported on the system
        """
        raise ActionNotSupported()

    def is_vm_suspended(self, instance_id):
        """Is the VM suspended? We'll never know because EC2 don't support this.

        Args:
            instance_id: ID of the instance to inspect
        Raises:
            ActionNotSupported: The action is not supported on the system
        """
        raise ActionNotSupported()

    def wait_vm_suspended(self, instance_id, num_sec):
        """We would wait forever - EC2 doesn't support this.

        Args:
            instance_id: ID of the instance to wait for
        Raises:
            ActionNotSupported: The action is not supported on the system
        """
        raise ActionNotSupported()

    def clone_vm(self, source_name, vm_name):
        raise NotImplementedError(
            'This function has not yet been implemented.')

    def deploy_template(self, template, *args, **kwargs):
        """Instantiate the requested template image (ami id)

        Accepts args/kwargs from boto's
        :py:meth:`run_instances<boto:boto.ec2.connection.EC2Connection.run_instances>` method

        Most important args are listed below.

        Args:
            template: Template name (AMI ID) to instantiate
            vm_name: Name of the instance (Name tag to set)
            instance_type: Type (flavor) of the instance

        Returns: Instance ID of the created instance

        Note: min_count and max_count args will be forced to '1'; if you're trying to do
              anything fancier than that, you might be in the wrong place

        """
        # Enforce create_vm only creating one VM
        self.logger.info(" Deploying EC2 template %s" % template)

        # strip out kwargs that ec2 doesn't understand
        timeout = kwargs.pop('timeout', 900)
        vm_name = kwargs.pop('vm_name', None)
        power_on = kwargs.pop('power_on', True)

        # Make sure we only provision one VM
        kwargs.update({'min_count': 1, 'max_count': 1})

        # sanity-check inputs
        if 'instance_type' not in kwargs:
            kwargs['instance_type'] = 'm1.small'
        if not template.startswith('ami'):
            # assume this is a lookup by name, get the ami id
            template = self._get_ami_id_by_name(template)

        # clone!
        reservation = self.api.run_instances(template, *args, **kwargs)
        instances = self._get_instances_from_reservations([reservation])
        # Should have only made one VM; return its ID for use in other methods
        self.wait_vm_running(instances[0].id, num_sec=timeout)

        if vm_name:
            self.set_name(instances[0].id, vm_name)
        if power_on:
            self.start_vm(instances[0].id)
        return instances[0].id

    def set_name(self, instance_id, new_name):
        self.logger.info("Setting name of EC2 instance %s to %s" %
                         (instance_id, new_name))
        instance = self._get_instance(instance_id)
        instance.add_tag('Name', new_name)
        return new_name

    def get_name(self, instance_id):
        return self._get_instance(instance_id).tags.get('Name', instance_id)

    def _get_instance(self, instance_id):
        instance_id = self._get_instance_id_by_name(instance_id)
        reservations = self.api.get_all_instances([instance_id])
        instances = self._get_instances_from_reservations(reservations)
        if len(instances) > 1:
            raise MultipleInstancesError

        try:
            return instances[0]
        except KeyError:
            return None

    def current_ip_address(self, instance_id):
        return str(self._get_instance(instance_id).ip_address)

    def get_ip_address(self, instance_id, **kwargs):
        return self.current_ip_address(instance_id)

    def _get_instance_id_by_name(self, instance_name):
        # Quick validation that the instance name isn't actually an ID
        # If people start naming their instances in such a way to break this,
        # check, that would be silly, but we can upgrade to regex if necessary.
        pattern = re.compile('^i-\w{8,17}$')
        if pattern.match(instance_name):
            return instance_name

        # Filter by the 'Name' tag
        filters = {
            'tag:Name': instance_name,
        }
        reservations = self.api.get_all_instances(filters=filters)
        instances = self._get_instances_from_reservations(reservations)
        if not instances:
            raise VMInstanceNotFound(instance_name)
        elif len(instances) > 1:
            raise MultipleInstancesError('Instance name "%s" is not unique' %
                                         instance_name)

        # We have an instance! return its ID
        return instances[0].id

    def _get_ami_id_by_name(self, image_name):
        matches = self.api.get_all_images(filters={'name': image_name})
        if not matches:
            raise ImageNotFoundError(image_name)
        elif len(matches) > 1:
            raise MultipleImagesError(
                'Template name %s returned more than one image_name. '
                'Use the ami-ID or remove duplicates from EC2' % image_name)

        return matches[0].id

    def does_vm_exist(self, name):
        try:
            self._get_instance_id_by_name(name)
            return True
        except MultipleInstancesError:
            return True
        except VMInstanceNotFound:
            return False

    def _get_instances_from_reservations(self, reservations):
        """Takes a sequence of reservations and returns their instances"""
        instances = list()
        for reservation in reservations:
            for instance in reservation.instances:
                instances.append(instance)
        return instances

    def _get_all_instances(self):
        """Gets all instances that EC2 can see"""
        reservations = self.api.get_all_instances()
        instances = self._get_instances_from_reservations(reservations)
        return instances

    def _block_until(self, instance_id, expected, timeout=90):
        """Blocks until the given instance is in one of the expected states

        Takes an optional timeout value.
        """
        wait_for(lambda: self.vm_status(instance_id) in expected,
                 num_sec=timeout)

    def remove_host_from_cluster(self, hostname):
        raise NotImplementedError('remove_host_from_cluster not implemented')

    def create_s3_bucket(self, bucket_name):
        self.logger.info("Creating bucket: {}".format(bucket_name))
        try:
            self.s3_connection.create_bucket(Bucket=bucket_name,
                                             CreateBucketConfiguration={
                                                 'LocationConstraint':
                                                 self.kwargs.get('region')
                                             })
            self.logger.info("Success: Bucket was successfully created.")
            return True
        except Exception:
            self.logger.exception(
                "Error: Bucket was not successfully created.")
            return False

    def upload_file_to_s3_bucket(self, bucket_name, file_path, file_name):
        bucket = self.s3_connection.Bucket(bucket_name)
        self.logger.info("uploading file {} to bucket: {}".format(
            file_path, bucket_name))
        if os.path.isfile(file_path):
            try:
                bucket.upload_file(file_path, file_name)
                self.logger.info("Success: uploading file completed")
                return True
            except Exception:
                self.logger.exception("File upload failed.")
                return False
        else:
            self.logger.error("Error: File to upload does not exist.")
            return False

    def object_exists_in_bucket(self, bucket_name, object_key):
        bucket = self.s3_connection.Bucket(name=bucket_name)
        objects = [o for o in bucket.objects.all() if o.key == object_key]
        return any(objects)

    def delete_s3_bucket(self, bucket_name):
        """TODO: Force delete - delete all objects and then bucket"""
        bucket = self.s3_connection.Bucket(bucket_name)
        self.logger.info("Trying to delete bucket {}".format(bucket_name))
        try:
            bucket.delete()
            self.logger.info(
                "Success: bucket {} was deleted.".format(bucket_name))
            return True
        except Exception:
            self.logger.exception(
                "Bucket {} deletion failed".format(bucket_name))
            return False

    def delete_objects_from_s3_bucket(self, bucket_name, object_keys):
        """Delete each of the given object_keys from the given bucket"""
        if not isinstance(object_keys, list):
            raise ValueError(
                "object_keys argument must be a list of key strings")
        bucket = self.s3_connection.Bucket(name=bucket_name)
        try:
            bucket.delete_objects(Delete={
                'Objects': [{
                    'Key': object_key
                } for object_key in object_keys]
            })
            return True
        except Exception:
            self.logger.exception(
                'Deleting object keys {} from Bucket "{}" failed'.format(
                    object_keys, bucket_name))
            return False

    def get_all_disassociated_addresses(self):
        return [
            addr for addr in self.api.get_all_addresses()
            if not addr.instance_id and not addr.network_interface_id
        ]

    def release_vpc_address(self, alloc_id):
        self.logger.info(" Releasing EC2 VPC EIP {}".format(str(alloc_id)))
        try:
            self.api.release_address(allocation_id=alloc_id)
            return True

        except ActionTimedOutError:
            return False

    def release_address(self, address):
        self.logger.info(" Releasing EC2-CLASSIC EIP {}".format(address))
        try:
            self.api.release_address(public_ip=address)
            return True

        except ActionTimedOutError:
            return False

    def get_all_unattached_volumes(self):
        return [
            volume for volume in self.api.get_all_volumes()
            if not volume.attach_data.status
        ]

    def delete_sqs_queue(self, queue_name):
        self.logger.info(" Deleting SQS queue {}".format(queue_name))
        try:
            queue = self.sqs_connection.get_queue(queue_name=queue_name)
            if queue:
                self.sqs_connection.delete_queue(queue=queue)
                return True
            else:
                return False

        except ActionTimedOutError:
            return False

    def get_all_unused_loadbalancers(self):
        return [
            loadbalancer
            for loadbalancer in self.elb_connection.get_all_load_balancers()
            if not loadbalancer.instances
        ]

    def delete_loadbalancer(self, loadbalancer):
        self.logger.info(" Deleting Elastic Load Balancer {}".format(
            loadbalancer.name))
        try:
            self.elb_connection.delete_load_balancer(loadbalancer.name)
            return True

        except ActionTimedOutError:
            return False

    def get_all_unused_network_interfaces(self):
        return [
            eni for eni in self.api.get_all_network_interfaces()
            if eni.status == "available"
        ]

    def import_image(self, s3bucket, s3key, format="vhd", description=None):
        self.logger.info(
            " Importing image %s from %s bucket with description %s in %s started "
            "successfully.", s3key, s3bucket, description, format)
        try:
            result = self.ec2_connection.import_image(DiskContainers=[{
                'Description':
                description if description is not None else s3key,
                'Format':
                format,
                'UserBucket': {
                    'S3Bucket': s3bucket,
                    'S3Key': s3key
                }
            }])
            task_id = result.get("ImportTaskId")
            return task_id

        except Exception:
            self.logger.exception("Import of {} image failed.".format(s3key))
            return False

    def get_import_image_task(self, task_id):
        result = self.ec2_connection.describe_import_image_tasks(
            ImportTaskIds=[task_id])
        result_task = result.get("ImportImageTasks")
        return result_task[0]

    def get_image_id_if_import_completed(self, task_id):
        result = self.get_import_image_task(task_id)
        result_status = result.get("Status")
        if result_status == 'completed':
            return result.get("ImageId")
        else:
            return False

    def copy_image(self, source_region, source_image, image_id):
        self.logger.info(
            " Copying image %s from region %s to region %s with image id %s",
            source_image, source_region, self.kwargs.get('region'), image_id)
        try:
            self.ec2_connection.copy_image(SourceRegion=source_region,
                                           SourceImageId=source_image,
                                           Name=image_id)
            return True

        except Exception:
            self.logger.exception(
                "Copy of {} image failed.".format(source_image))
            return False

    def deregister_image(self, image_id, delete_snapshot=True):
        """Deregister the given AMI ID, only valid for self owned AMI's"""
        images = self.api.get_all_images(owners=['self'],
                                         filters={'image-type': 'machine'})
        matching_images = [image for image in images if image.id == image_id]

        try:
            for image in matching_images:
                image.deregister(delete_snapshot=delete_snapshot)
            return True
        except Exception:
            self.logger.exception(
                'Deregister of image_id {} failed'.format(image_id))
            return False

    def list_topics(self):
        return self.sns_connection.list_topics()

    def get_arn_if_topic_exists(self, topic_name):
        topics = self.list_topics()

        # There is no way to get topic_name, so it
        # has to be parsed from ARN, which looks
        # like this: arn:aws:sns:sa-east-1:ACCOUNT_NUM:AWSConfig_topic

        topic_found = [
            t.get('TopicArn') for t in topics.get('Topics')
            if t.get('TopicArn').split(':')[-1] == topic_name
        ]
        if topic_found:
            return topic_found[0]
        else:
            return False

    def delete_topic(self, arn):
        self.logger.info(" Deleting SNS Topic {} ".format(arn))
        try:
            self.sns_connection.delete_topic(TopicArn=arn)
            return True

        except Exception:
            self.logger.exception("Delete of {} topic failed.".format(arn))
            return False

    def volume_exists_and_available(self, volume_name=None, volume_id=None):
        """
        Method for checking existence and availability state for volume

        Args:
            volume_name: Name of volume, if not set volume_id must be set
            volume_id: ID of volume in format vol-random_chars, if not set volume_name must be set

        Returns:
            True if volume exists and is available.
            False if volume doesn't exist or is not available.
        """
        if volume_id:
            try:
                response = self.ec2_connection.describe_volumes(
                    VolumeIds=[volume_id],
                    Filters=[{
                        'Name': 'status',
                        'Values': ['available']
                    }])
                if response.get('Volumes'):
                    return True
                else:
                    return False
            except Exception:
                return False
        elif volume_name:
            response = self.ec2_connection.describe_volumes(
                Filters=[{
                    'Name': 'status',
                    'Values': ['available']
                }, {
                    'Name': 'tag:Name',
                    'Values': [volume_name]
                }])
            if response.get('Volumes'):
                return True
            else:
                return False
        else:
            raise TypeError(
                "Neither volume_name nor volume_id were specified.")

    def snapshot_exists(self, snapshot_name=None, snapshot_id=None):
        """
        Method for checking existence of snapshot.

        Args:
            snapshot_name: Name of snapshot, if not set snapshot_id must be set.
            snapshot_id: Id of snapshot in format snap-random_chars, if not set snapshot_name
            must be set.

        Returns:
            True if snapshot exists.
            False if snapshot doesn't exist.
        """
        if snapshot_id:
            try:
                response = self.ec2_connection.describe_snapshots(
                    SnapshotIds=[snapshot_id])
                if response.get('Snapshots'):
                    return True
                else:
                    return False
            except Exception:
                return False
        elif snapshot_name:
            response = self.ec2_connection.describe_snapshots(
                Filters=[{
                    'Name': 'tag:Name',
                    'Values': [snapshot_name]
                }])
            if response.get('Snapshots'):
                return True
            else:
                return False
        else:
            raise TypeError(
                "Neither snapshot_name nor snapshot_id were specified.")

    def copy_snapshot(self, source_snapshot_id, source_region=None):
        """
        This method is not working properly because of bug in boto3.
        It creates new snapshot with empty size and error.
        Args:
            source_snapshot_id: Id of source snapshot in format snap-random_chars
            source_region: Source region, if not set then ec2_connection region

        Returns:
            True when snapshot copy started successfully.
            False when snapshot copy didn't start.
        """
        if not source_region:
            source_region = self.kwargs.get('region')
        try:
            self.ec2_connection.copy_snapshot(
                SourceRegion=source_region,
                SourceSnapshotId=source_snapshot_id,
                DestinationRegion=source_region)
            return True
        except Exception:
            self.logger.exception(
                "Copy snapshot with id {} failed.".format(source_snapshot_id))
            return False
Exemple #4
0
class MSBManager:
    def __init__(self, aws_access_key, aws_secret_key):
        self.ec2_conn = EC2Connection(aws_access_key, aws_secret_key)
        self.elb_conn = ELBConnection(aws_access_key, aws_secret_key)
        self.auto_scale_conn = AutoScaleConnection(aws_access_key, aws_secret_key)
        self.cloud_watch_conn = CloudWatchConnection(aws_access_key, aws_secret_key)
        self.default_cooldown = 60

    def get_security_group(self, name):
        sgs = [g for g in self.ec2_conn.get_all_security_groups() if g.name == name]
        return sgs[0] if sgs else None

    def create_security_group(self, name, description):
        sgs = [g for g in self.ec2_conn.get_all_security_groups() if g.name == name]
        sg = sgs[0] if sgs else None
        if not sgs:
            sg = self.ec2_conn.create_security_group(name, description)

        try:
            sg.authorize(ip_protocol="-1", from_port=None, to_port=None, cidr_ip="0.0.0.0/0", dry_run=False)
        except EC2ResponseError:
            pass
        return sg

    def remove_security_group(self, name):
        self.ec2_conn.delete_security_group(name=name)

    def create_instance(self, image, instance_type, key_name, zone, security_groups, tags):
        instance = None
        reservations = self.ec2_conn.get_all_instances()
        for reservation in reservations:
            for i in reservation.instances:
                if "Name" in i.tags and i.tags["Name"] == tags["Name"] and i.state == "running":
                    instance = i
                    break

        if not instance:
            reservation = self.ec2_conn.run_instances(
                image,
                instance_type=instance_type,
                key_name=key_name,
                placement=zone,
                security_groups=security_groups,
                monitoring_enabled=True,
            )
            instance = reservation.instances[0]
            while not instance.update() == "running":
                time.sleep(5)
            time.sleep(10)
            self.ec2_conn.create_tags([instance.id], tags)

        return instance

    def request_spot_instance(self, bid, image, instance_type, key_name, zone, security_groups, tags):
        req = self.ec2_conn.request_spot_instances(
            price=bid,
            instance_type=instance_type,
            image_id=image,
            placement=zone,
            key_name=key_name,
            security_groups=security_groups,
        )
        instance_id = None

        while not instance_id:
            job_sir_id = req[0].id
            requests = self.ec2_conn.get_all_spot_instance_requests()
            for sir in requests:
                if sir.id == job_sir_id:
                    instance_id = sir.instance_id
                    break
            print "Job {} not ready".format(job_sir_id)
            time.sleep(60)

        self.ec2_conn.create_tags([instance_id], tags)

    def remove_instance(self, instance_id):
        self.remove_instances([instance_id])

    def remove_instances(self, instance_ids):
        self.ec2_conn.terminate_instances(instance_ids)

    def remove_instance_by_tag_name(self, name):
        reservations = self.ec2_conn.get_all_instances()
        data_centers_intance_ids = []
        for reservation in reservations:
            for instance in reservation.instances:
                if "Name" in instance.tags and instance.tags["Name"] == name and instance.state == "running":
                    data_centers_intance_ids.append(instance.id)
        if data_centers_intance_ids:
            self.remove_instances(data_centers_intance_ids)

    def create_elb(self, name, zone, project_tag_value, security_group_id, instance_ids=None):
        lbs = [l for l in self.elb_conn.get_all_load_balancers() if l.name == name]
        lb = lbs[0] if lbs else None
        if not lb:
            hc = HealthCheck(
                timeout=50, interval=60, healthy_threshold=2, unhealthy_threshold=8, target="HTTP:80/heartbeat"
            )
            ports = [(80, 80, "http")]
            zones = [zone]
            lb = self.elb_conn.create_load_balancer(name, zones, ports)

            self.elb_conn.apply_security_groups_to_lb(name, [security_group_id])
            lb.configure_health_check(hc)
            if instance_ids:
                lb.register_instances(instance_ids)

            params = {
                "LoadBalancerNames.member.1": lb.name,
                "Tags.member.1.Key": "15619project",
                "Tags.member.1.Value": project_tag_value,
            }
            lb.connection.get_status("AddTags", params, verb="POST")
        return lb

    def remove_elb(self, name):
        self.elb_conn.delete_load_balancer(name)

    def create_launch_configuration(self, name, image, key_name, security_groups, instance_type):
        lcs = [l for l in self.auto_scale_conn.get_all_launch_configurations() if l.name == name]
        lc = lcs[0] if lcs else None
        if not lc:
            lc = LaunchConfiguration(
                name=name,
                image_id=image,
                key_name=key_name,
                security_groups=[security_groups],
                instance_type=instance_type,
            )
            self.auto_scale_conn.create_launch_configuration(lc)
        return lc

    def remove_launch_configuration(self, name):
        self.auto_scale_conn.delete_launch_configuration(name)

    def create_autoscaling_group(self, name, lb_name, zone, tags, instance_ids=None):
        lc = self.create_launch_configuration()
        as_groups = [a for a in self.auto_scale_conn.get_all_groups() if a.name == name]
        as_group = as_groups[0] if as_groups else None
        if not as_group:
            as_group = AutoScalingGroup(
                group_name=name,
                load_balancers=[lb_name],
                availability_zones=[zone],
                launch_config=lc,
                min_size=4,
                max_size=4,
                health_check_type="ELB",
                health_check_period=120,
                connection=self.auto_scale_conn,
                default_cooldown=self.default_cooldown,
                desired_capacity=4,
                tags=tags,
            )

            self.auto_scale_conn.create_auto_scaling_group(as_group)
            if instance_ids:
                self.auto_scale_conn.attach_instances(name, instance_ids)

            scale_up_policy = ScalingPolicy(
                name="scale_up",
                adjustment_type="ChangeInCapacity",
                as_name=name,
                scaling_adjustment=1,
                cooldown=self.default_cooldown,
            )
            scale_down_policy = ScalingPolicy(
                name="scale_down",
                adjustment_type="ChangeInCapacity",
                as_name=name,
                scaling_adjustment=-1,
                cooldown=self.default_cooldown,
            )

            self.auto_scale_conn.create_scaling_policy(scale_up_policy)
            self.auto_scale_conn.create_scaling_policy(scale_down_policy)

            scale_up_policy = self.auto_scale_conn.get_all_policies(as_group=name, policy_names=["scale_up"])[0]
            scale_down_policy = self.auto_scale_conn.get_all_policies(as_group=name, policy_names=["scale_down"])[0]

            alarm_dimensions = {"AutoScalingGroupName": name}
            scale_up_alarm = MetricAlarm(
                name="scale_up_on_cpu",
                namespace="AWS/EC2",
                metric="CPUUtilization",
                statistic="Average",
                comparison=">",
                threshold=85,
                period=60,
                evaluation_periods=1,
                alarm_actions=[scale_up_policy.policy_arn],
                dimensions=alarm_dimensions,
            )
            self.cloud_watch_conn.create_alarm(scale_up_alarm)
            scale_down_alarm = MetricAlarm(
                name="scale_down_on_cpu",
                namespace="AWS/EC2",
                metric="CPUUtilization",
                statistic="Average",
                comparison="<",
                threshold=60,
                period=60,
                evaluation_periods=1,
                alarm_actions=[scale_down_policy.policy_arn],
                dimensions=alarm_dimensions,
            )
            self.cloud_watch_conn.create_alarm(scale_down_alarm)

        return as_group

    def update_autoscaling_group_max_size(self, as_group, max_size):
        setattr(as_group, "max_size", max_size)
        as_group.update()

    def update_autoscaling_group_min_size(self, as_group, min_size):
        setattr(as_group, "min_size", min_size)
        as_group.update()

    def remove_autoscaling_group(self, name):
        self.auto_scale_conn.delete_auto_scaling_group(name)
Exemple #5
0
class EC2System(System, VmMixin, TemplateMixin, StackMixin):
    """EC2 Management System, powered by boto

    Wraps the EC2 API

    Instead of username and password, accepts access_key_id and
    secret_access_key, the AWS analogs to those ideas. These are passed, along
    with any kwargs, straight through to boto's EC2 connection factory. This
    allows customization of the EC2 connection, to connect to another region,
    for example.

    For the purposes of the EC2 system, a VM's instance ID is its name because
    EC2 instances don't have to have unique names.

    Args:
        *kwargs: Arguments to connect, usually, username, password, region.
    Returns: A :py:class:`EC2System` object.
    """

    _stats_available = {
        'num_vm': lambda self: len(self.list_vms(hide_deleted=False)),
        'num_template': lambda self: len(self.list_templates()),
    }

    can_suspend = False
    can_pause = False

    def __init__(self, **kwargs):
        super(EC2System, self).__init__(**kwargs)
        self._username = kwargs.get('username')
        self._password = kwargs.get('password')
        connection_config = Config(signature_version='s3v4',
                                   retries=dict(max_attempts=10))

        self._region_name = kwargs.get('region')
        self._region = get_region(self._region_name)
        self.api = EC2Connection(self._username,
                                 self._password,
                                 region=self._region)

        self.sqs_connection = _sqs_connection.SQSConnection(
            self._username,
            self._password,
            region=_regions(regionmodule=sqs, regionname=self._region_name))

        self.elb_connection = ELBConnection(self._username,
                                            self._password,
                                            region=_regions(
                                                regionmodule=elb,
                                                regionname=self._region_name))

        self.s3_connection = boto3resource(
            's3',
            aws_access_key_id=self._username,
            aws_secret_access_key=self._password,
            region_name=self._region_name,
            config=connection_config)

        self.ec2_connection = boto3client('ec2',
                                          aws_access_key_id=self._username,
                                          aws_secret_access_key=self._password,
                                          region_name=self._region_name,
                                          config=connection_config)

        self.cloudformation_connection = boto3client(
            'cloudformation',
            aws_access_key_id=self._username,
            aws_secret_access_key=self._password,
            region_name=self._region_name,
            config=connection_config)

        self.sns_connection = boto3client('sns', region_name=self._region_name)

        self.kwargs = kwargs

    @property
    def _identifying_attrs(self):
        return {
            'username': self._username,
            'password': self._password,
            'region': self._region_name
        }

    @property
    def can_suspend(self):
        return False

    @property
    def can_pause(self):
        return False

    def disconnect(self):
        """Disconnect from the EC2 API -- NOOP

        AWS EC2 service is stateless, so there's nothing to disconnect from
        """
        pass

    def info(self):
        """Returns the current versions of boto and the EC2 API being used"""
        return '%s %s' % (UserAgent, self.api.APIVersion)

    def _get_instances(self, **kwargs):
        """
        Gets instance reservations and parses instance objects
        """
        reservations = self.api.get_all_instances(**kwargs)
        instances = list()
        for reservation in reservations:
            for instance in reservation.instances:
                instances.append(EC2Instance(system=self, raw=instance))
        return instances

    @staticmethod
    def _add_filter_for_terminated(kwargs_dict):
        new_filter = {
            'instance-state-name': [
                api_state
                for api_state, vm_state in EC2Instance.state_map.items()
                if vm_state is not VmState.DELETED
            ]
        }
        if 'filters' not in kwargs_dict:
            kwargs_dict['filters'] = new_filter
        else:
            kwargs_dict['filters'].update(new_filter)
        return kwargs_dict

    def find_vms(self, name=None, id=None, filters=None, hide_deleted=True):
        """
        Find instance on ec2 system

        Supported queries include searching by name tag, id, or passing
        in a specific filters dict to the system API. You can only
        select one of these methods.

        Args:
            name (str): name of instance (which is a tag)
            id (str): id of instance
            filters (dict): filters to pass along to system.api.get_all_instances()
            hide_deleted: do not list an instance if it has been terminated

        Returns:
            List of EC2Instance objects that match
        """
        # Validate args
        filled_args = [arg for arg in (
            name,
            id,
            filters,
        ) if arg]
        if not filled_args or len(filled_args) > 1:
            raise ValueError(
                "You must select one of these search methods: name, id, or filters"
            )

        if id:
            kwargs = {'instance_ids': [id]}
        elif filters:
            kwargs = {'filters': filters}
        elif name:
            # Quick validation that the instance name isn't actually an ID
            pattern = re.compile(r'^i-\w{8,17}$')
            if pattern.match(name):
                # Switch to using the id search method
                kwargs = {'instance_ids': [name]}
            else:
                kwargs = {'filters': {'tag:Name': name}}

        if hide_deleted:
            self._add_filter_for_terminated(kwargs)

        instances = self._get_instances(**kwargs)

        return instances

    def get_vm(self, name, hide_deleted=True):
        """
        Get a single EC2Instance with name or id equal to 'name'

        Must be a unique name

        Args:
            name: name or id of instance
        Returns:
            EC2Instance object
        Raises:
            VMInstanceNotFound if no instance exists with this name/id
            MultipleInstancesError if name is not unique
        """
        instances = self.find_vms(name=name, hide_deleted=hide_deleted)
        if not instances:
            raise VMInstanceNotFound(name)
        elif len(instances) > 1:
            raise MultipleInstancesError('Instance name "%s" is not unique' %
                                         name)
        return instances[0]

    def list_vms(self, hide_deleted=True):
        """
        Returns a list of instances currently active on EC2 (not terminated)
        """
        kwargs = {}
        if hide_deleted:
            self._add_filter_for_terminated(kwargs)
        return [inst for inst in self._get_instances(**kwargs)]

    def create_vm(self,
                  image_id,
                  min_count=1,
                  max_count=1,
                  instance_type='t1.micro',
                  vm_name='',
                  **kwargs):
        """
        Creates aws instances.

        TODO:
            Check whether instances were really created.
            Add additional arguments to be able to modify settings for instance creation.
        Args:
            image_id: ID of AMI
            min_count: Minimal count of instances - useful only if creating thousand of instances
            max_count: Maximal count of instances - defaults to 1
            instance_type: Type of instances, catalog of instance types is here:
                https://aws.amazon.com/ec2/instance-types/
                Defaults to 't1.micro' which is the least expensive instance type

            vm_name: Name of instances, can be blank

        Returns:
            List of EC2Instance objects for all instances created
        """
        self.logger.debug("ec2.create_vm() -- Ignored kwargs: %s", kwargs)
        self.logger.info(
            "Creating instances[%d] with name %s,type %s and image ID: %s ",
            max_count, vm_name, instance_type, image_id)
        try:
            result = self.ec2_connection.run_instances(
                ImageId=image_id,
                MinCount=min_count,
                MaxCount=max_count,
                InstanceType=instance_type,
                TagSpecifications=[
                    {
                        'ResourceType': 'instance',
                        'Tags': [
                            {
                                'Key': 'Name',
                                'Value': vm_name,
                            },
                        ]
                    },
                ])
        except Exception:
            self.logger.exception("Create of instance '%s' failed.", vm_name)
            raise

        try:
            instances_json = result['Instances']
            instance_ids = [entry['InstanceId'] for entry in instances_json]
        except KeyError:
            self.logger.exception(
                "Unable to parse all InstanceId's from response json")
            raise

        instances = [
            EC2Instance(system=self, uuid=uuid) for uuid in instance_ids
        ]
        for instance in instances:
            self.logger.info("Waiting for instance '%s' to reach steady state",
                             instance.uuid)
            instance.wait_for_steady_state()
        if len(instances) == 1:
            return instances[0]
        else:
            return instances

    def list_stacks(self, stack_status_filter=StackStates.ACTIVE):
        """
        Returns a list of Stack objects

        stack_status_filter:  list of stack statuses to filter for. See ``StackStates``
        """
        stack_list = [
            CloudFormationStack(system=self, uuid=stack_summary['StackId'])
            for stack_summary in self.cloudformation_connection.list_stacks()
            ['StackSummaries']
            if stack_summary['StackStatus'] in stack_status_filter
        ]
        return stack_list

    def find_stacks(self, name=None, id=None):
        """
        Return list of all stacks with given name or id

        According to boto3 docs, you can use name or ID in these situations:

        "Running stacks: You can specify either the stack's name or its unique stack ID.
        Deleted stacks: You must specify the unique stack ID."

        If 'name' kwarg is given and we fail to locate the stack initially, we will retry with
        'list_stacks' to get the list of all stacks with this name (even if they are deleted)

        If 'id' kwarg is given and we hit an error finding it, we don't call list_stacks. This
        is the more efficient kwarg to use if you are searching specifically by id.

        Args:
            name: name to search for
            id: id to search for
        Returns:
            List of CloudFormationStack objects
        """
        if not name and not id:
            raise ValueError('missing one of required kwargs: name, id')

        if name:
            searching_by_name = True
            name_or_id = name
        elif id:
            searching_by_name = False
            name_or_id = id

        stack_list = []
        try:
            # Try to find by name/id directly by using describe_stacks
            stack_list = [
                CloudFormationStack(system=self,
                                    uuid=stack['StackId'],
                                    raw=stack)
                for stack in self.cloudformation_connection.describe_stacks(
                    StackName=name_or_id)['Stacks']
            ]
        except ClientError as error:
            # Stack not found, if searching by name, look through deleted stacks...
            if searching_by_name and 'Stack with id {} does not exist'.format(
                    name) in str(error):
                stack_list = [
                    CloudFormationStack(system=self,
                                        uuid=stack_summary['StackId'])
                    for stack_summary in self.cloudformation_connection.
                    list_stacks()['StackSummaries']
                    if stack_summary['StackName'] == name
                ]
        return stack_list

    def get_stack(self, name):
        """
        Get single stack if it exists

        Args:
            name: unique name or id of the stack
        Returns:
            CloudFormationStack object
        """
        stacks = self.find_stacks(name)
        if not stacks:
            raise NotFoundError("Stack with name {} not found".format(name))
        elif len(stacks) > 1:
            raise MultipleItemsError(
                "Multiple stacks with name {} found".format(name))
        return stacks[0]

    def list_templates(self,
                       executable_by_me=True,
                       owned_by_me=True,
                       public=False):
        """
        List images on ec2 of image-type 'machine'

        Args:
            executable_by_me: search images executable by me (default True)
            owned_by_me: search images owned only by me (default True)
            public: search public images (default False)
        """
        img_filter = {'image-type': 'machine'}

        if not any([public, executable_by_me, owned_by_me]):
            raise ValueError(
                "One of the following must be 'True': owned_by_me, executable_by_me, public"
            )

        images = []
        if public:
            images.extend(self.api.get_all_images(filters=img_filter))
        if executable_by_me:
            images.extend(
                self.api.get_all_images(executable_by=['self'],
                                        filters=img_filter))
        if owned_by_me:
            images.extend(
                self.api.get_all_images(owners=['self'], filters=img_filter))

        return [EC2Image(system=self, raw=image) for image in set(images)]

    def find_templates(self,
                       name=None,
                       id=None,
                       executable_by_me=True,
                       owned_by_me=True,
                       public=False,
                       filters=None):
        """
        Find image on ec2 system

        Supported queries include searching by name, id, or passing
        in a specific filters dict to the system API. You can only
        select one of these methods.

        Args:
            name (str): name of image
            id (str): id of image
            filters (dict): filters to pass along to system.api.get_all_images()
            executable_by_me: search images executable by me (default True)
            owned_by_me: search images owned only by me (default True)
            public: search public images (default False)

        Returns:
            List of EC2Image objects that match
        """
        # Validate args
        filled_args = [arg for arg in (
            name,
            id,
            filters,
        ) if arg]
        if not filled_args or len(filled_args) > 1:
            raise ValueError(
                "You must select one of these search methods: name, id, or filters"
            )

        if id:
            kwargs = {'image_ids': [id]}
        elif filters:
            kwargs = {'filters': filters}
        elif name:
            # Quick validation that the image name isn't actually an ID
            if name.startswith('ami-'):
                # Switch to using the id search method
                kwargs = {'image_ids': [name]}
            else:
                kwargs = {'filters': {'name': name}}

        if not any([public, executable_by_me, owned_by_me]):
            raise ValueError(
                "One of the following must be 'True': owned_by_me, executable_by_me, public"
            )

        images = []
        if public:
            images.extend(self.api.get_all_images(**kwargs))
        if executable_by_me:
            images.extend(
                self.api.get_all_images(executable_by=['self'], **kwargs))
        if owned_by_me:
            images.extend(self.api.get_all_images(owners=['self'], **kwargs))

        return [EC2Image(system=self, raw=image) for image in set(images)]

    def get_template(self, name_or_id):
        matches = self.find_templates(name=name_or_id)
        if not matches:
            raise ImageNotFoundError(
                'Unable to find image {}'.format(name_or_id))
        elif len(matches) > 1:
            raise MultipleImagesError(
                'Image name {} returned more than one image '
                'Use the ami-ID or remove duplicates from EC2'.format(
                    name_or_id))
        return matches[0]

    def create_template(self, *args, **kwargs):
        raise NotImplementedError

    # TODO: Move everything below here into the entity/class-based structure

    def create_s3_bucket(self, bucket_name):
        self.logger.info("Creating bucket: '%s'", bucket_name)
        try:
            self.s3_connection.create_bucket(Bucket=bucket_name,
                                             CreateBucketConfiguration={
                                                 'LocationConstraint':
                                                 self.kwargs.get('region')
                                             })
            self.logger.info("Success: Bucket was successfully created.")
            return True
        except Exception:
            self.logger.exception(
                "Error: Bucket was not successfully created.")
            return False

    def upload_file_to_s3_bucket(self, bucket_name, file_path, file_name):
        bucket = self.s3_connection.Bucket(bucket_name)
        self.logger.info("uploading file '%s' to bucket: '%s'", file_path,
                         bucket_name)
        if os.path.isfile(file_path):
            try:
                bucket.upload_file(file_path, file_name)
                self.logger.info("Success: uploading file completed")
                return True
            except Exception:
                self.logger.exception("File upload failed.")
                return False
        else:
            self.logger.error("Error: File to upload does not exist.")
            return False

    def object_exists_in_bucket(self, bucket_name, object_key):
        bucket = self.s3_connection.Bucket(name=bucket_name)
        objects = [o for o in bucket.objects.all() if o.key == object_key]
        return any(objects)

    def delete_s3_bucket(self, bucket_name):
        """TODO: Force delete - delete all objects and then bucket"""
        bucket = self.s3_connection.Bucket(bucket_name)
        self.logger.info("Trying to delete bucket '%s'", bucket_name)
        try:
            bucket.delete()
            self.logger.info("Success: bucket '%s' was deleted.", bucket_name)
            return True
        except Exception:
            self.logger.exception("Bucket '%s' deletion failed", bucket_name)
            return False

    def delete_objects_from_s3_bucket(self, bucket_name, object_keys):
        """Delete each of the given object_keys from the given bucket"""
        if not isinstance(object_keys, list):
            raise ValueError(
                "object_keys argument must be a list of key strings")
        bucket = self.s3_connection.Bucket(name=bucket_name)
        try:
            bucket.delete_objects(Delete={
                'Objects': [{
                    'Key': object_key
                } for object_key in object_keys]
            })
            return True
        except Exception:
            self.logger.exception(
                "Deleting object keys %s from Bucket '%s' failed", object_keys,
                bucket_name)
            return False

    def get_all_disassociated_addresses(self):
        return [
            addr for addr in self.api.get_all_addresses()
            if not addr.instance_id and not addr.network_interface_id
        ]

    def release_vpc_address(self, alloc_id):
        self.logger.info(" Releasing EC2 VPC EIP '%s'", str(alloc_id))
        try:
            self.api.release_address(allocation_id=alloc_id)
            return True

        except ActionTimedOutError:
            return False

    def release_address(self, address):
        self.logger.info(" Releasing EC2-CLASSIC EIP '%s'", address)
        try:
            self.api.release_address(public_ip=address)
            return True

        except ActionTimedOutError:
            return False

    def get_all_unattached_volumes(self):
        return [
            volume for volume in self.api.get_all_volumes()
            if not volume.attach_data.status
        ]

    def delete_sqs_queue(self, queue_name):
        self.logger.info(" Deleting SQS queue '%s'", queue_name)
        try:
            queue = self.sqs_connection.get_queue(queue_name=queue_name)
            if queue:
                self.sqs_connection.delete_queue(queue=queue)
                return True
            else:
                return False

        except ActionTimedOutError:
            return False

    def get_all_unused_loadbalancers(self):
        return [
            loadbalancer
            for loadbalancer in self.elb_connection.get_all_load_balancers()
            if not loadbalancer.instances
        ]

    def delete_loadbalancer(self, loadbalancer):
        self.logger.info(" Deleting Elastic Load Balancer '%s'",
                         loadbalancer.name)
        try:
            self.elb_connection.delete_load_balancer(loadbalancer.name)
            return True

        except ActionTimedOutError:
            return False

    def get_all_unused_network_interfaces(self):
        return [
            eni for eni in self.api.get_all_network_interfaces()
            if eni.status == "available"
        ]

    def import_image(self, s3bucket, s3key, format="vhd", description=None):
        self.logger.info(
            " Importing image %s from %s bucket with description %s in %s started successfully.",
            s3key, s3bucket, description, format)
        try:
            result = self.ec2_connection.import_image(DiskContainers=[{
                'Description':
                description if description is not None else s3key,
                'Format':
                format,
                'UserBucket': {
                    'S3Bucket': s3bucket,
                    'S3Key': s3key
                }
            }])
            task_id = result.get("ImportTaskId")
            return task_id

        except Exception:
            self.logger.exception("Import of image '%s' failed.", s3key)
            return False

    def copy_image(self, source_region, source_image, image_id):
        self.logger.info(
            " Copying image %s from region %s to region %s with image id %s",
            source_image, source_region, self.kwargs.get('region'), image_id)
        try:
            copy_image = self.ec2_connection.copy_image(
                SourceRegion=source_region,
                SourceImageId=source_image,
                Name=image_id)
            return copy_image.image_id

        except Exception:
            self.logger.exception("Copy of image '%s' failed.", source_image)
            return False

    def get_import_image_task(self, task_id):
        result = self.ec2_connection.describe_import_image_tasks(
            ImportTaskIds=[task_id])
        result_task = result.get("ImportImageTasks")
        return result_task[0]

    def get_image_id_if_import_completed(self, task_id):
        result = self.get_import_image_task(task_id)
        result_status = result.get("Status")
        if result_status == 'completed':
            return result.get("ImageId")
        else:
            return False

    def list_topics(self):
        return self.sns_connection.list_topics()

    def get_arn_if_topic_exists(self, topic_name):
        topics = self.list_topics()

        # There is no way to get topic_name, so it
        # has to be parsed from ARN, which looks
        # like this: arn:aws:sns:sa-east-1:ACCOUNT_NUM:AWSConfig_topic

        topic_found = [
            t.get('TopicArn') for t in topics.get('Topics')
            if t.get('TopicArn').split(':')[-1] == topic_name
        ]
        if topic_found:
            return topic_found[0]
        else:
            return False

    def delete_topic(self, arn):
        self.logger.info(" Deleting SNS Topic '%s'", arn)
        try:
            self.sns_connection.delete_topic(TopicArn=arn)
            return True

        except Exception:
            self.logger.exception("Delete of topic '%s' failed.", arn)
            return False

    def volume_exists_and_available(self, volume_name=None, volume_id=None):
        """
        Method for checking existence and availability state for volume

        Args:
            volume_name: Name of volume, if not set volume_id must be set
            volume_id: ID of volume in format vol-random_chars, if not set volume_name must be set

        Returns:
            True if volume exists and is available.
            False if volume doesn't exist or is not available.
        """
        if volume_id:
            try:
                response = self.ec2_connection.describe_volumes(
                    VolumeIds=[volume_id],
                    Filters=[{
                        'Name': 'status',
                        'Values': ['available']
                    }])
                if response.get('Volumes'):
                    return True
                else:
                    return False
            except Exception:
                return False
        elif volume_name:
            response = self.ec2_connection.describe_volumes(
                Filters=[{
                    'Name': 'status',
                    'Values': ['available']
                }, {
                    'Name': 'tag:Name',
                    'Values': [volume_name]
                }])
            if response.get('Volumes'):
                return True
            else:
                return False
        else:
            raise TypeError(
                "Neither volume_name nor volume_id were specified.")

    def snapshot_exists(self, snapshot_name=None, snapshot_id=None):
        """
        Method for checking existence of snapshot.

        Args:
            snapshot_name: Name of snapshot, if not set snapshot_id must be set.
            snapshot_id: Id of snapshot in format snap-random_chars, if not set snapshot_name
            must be set.

        Returns:
            True if snapshot exists.
            False if snapshot doesn't exist.
        """
        if snapshot_id:
            try:
                response = self.ec2_connection.describe_snapshots(
                    SnapshotIds=[snapshot_id])
                if response.get('Snapshots'):
                    return True
                else:
                    return False
            except Exception:
                return False
        elif snapshot_name:
            response = self.ec2_connection.describe_snapshots(
                Filters=[{
                    'Name': 'tag:Name',
                    'Values': [snapshot_name]
                }])
            if response.get('Snapshots'):
                return True
            else:
                return False
        else:
            raise TypeError(
                "Neither snapshot_name nor snapshot_id were specified.")

    def copy_snapshot(self, source_snapshot_id, source_region=None):
        """
        This method is not working properly because of bug in boto3.
        It creates new snapshot with empty size and error.
        Args:
            source_snapshot_id: Id of source snapshot in format snap-random_chars
            source_region: Source region, if not set then ec2_connection region

        Returns:
            True when snapshot copy started successfully.
            False when snapshot copy didn't start.
        """
        if not source_region:
            source_region = self.kwargs.get('region')
        try:
            self.ec2_connection.copy_snapshot(
                SourceRegion=source_region,
                SourceSnapshotId=source_snapshot_id,
                DestinationRegion=source_region)
            return True
        except Exception:
            self.logger.exception("Copy snapshot with id '%s' failed.",
                                  source_snapshot_id)
            return False

    def list_load_balancer(self):
        self.logger.info("Attempting to List EC2 Load Balancers")
        return [
            loadbalancer.name
            for loadbalancer in self.elb_connection.get_all_load_balancers()
        ]

    def list_network(self):
        self.logger.info("Attempting to List EC2 Virtual Private Networks")
        networks = self.ec2_connection.describe_network_acls()['NetworkAcls']
        # EC2 api does not return the tags of the networks.... so returns only the IDs.
        return [vpc_id['VpcId'] for vpc_id in networks]

    def list_subnet(self):
        self.logger.info("Attempting to List EC2 Subnets")
        subnets = self.ec2_connection.describe_subnets()['Subnets']
        subnets_names = []

        # Subnets are not having mandatory tags names. They can have multiple tags, but only the tag
        # 'Name' will be taken as the subnet name. If not tag is given, CFME displays the SubnetId
        for subnet in subnets:
            if 'Tags' in subnet and subnet['Tags']:
                for tag in subnet['Tags']:
                    if 'Name' in tag.values():
                        subnets_names.append(tag['Value'])
            else:
                subnets_names.append(subnet['SubnetId'])
        return subnets_names

    def list_security_group(self):
        self.logger.info("Attempting to List EC2 security groups")
        return [sec_gp.name for sec_gp in self.api.get_all_security_groups()]

    def list_router(self):
        route_tables = self.ec2_connection.describe_route_tables(
        )['RouteTables']
        routers_names = []

        # Routers names are tags which are not mandatory, and tag with key called Name will be
        # used to name the router. If no tag name is provided, the routerTableId will be
        # displayed as name in CFME.
        for route in route_tables:
            if route['Tags']:
                for tag in route['Tags']:
                    if 'Name' in tag.values():
                        routers_names.append(tag['Value'])
            else:
                routers_names.append(route['RouteTableId'])

        return routers_names
Exemple #6
0
class BotoBalanceInterface(BalanceInterface):
    conn = None
    saveclcdata = False

    def __init__(self, clc_host, access_id, secret_key, token):
        #boto.set_stream_logger('foo')
        path = '/services/LoadBalancing'
        port = 8773
        if clc_host[len(clc_host) - 13:] == 'amazonaws.com':
            clc_host = clc_host.replace('ec2', 'elasticloadbalancing', 1)
            path = '/'
            reg = None
            port = 443
        reg = RegionInfo(name='eucalyptus', endpoint=clc_host)
        self.conn = ELBConnection(access_id,
                                  secret_key,
                                  region=reg,
                                  port=port,
                                  path=path,
                                  is_secure=True,
                                  security_token=token,
                                  debug=0)
        self.conn.https_validate_certificates = False
        self.conn.http_connection_kwargs['timeout'] = 30

    def __save_json__(self, obj, name):
        f = open(name, 'w')
        json.dump(obj, f, cls=BotoJsonBalanceEncoder, indent=2)
        f.close()

    def create_load_balancer(self,
                             name,
                             zones,
                             listeners,
                             subnets=None,
                             security_groups=None,
                             scheme='internet-facing'):
        return self.conn.create_load_balancer(name, zones, listeners, subnets,
                                              security_groups, scheme)

    def delete_load_balancer(self, name):
        return self.conn.delete_load_balancer(name)

    def get_all_load_balancers(self, load_balancer_names=None):
        obj = self.conn.get_all_load_balancers(load_balancer_names)
        if self.saveclcdata:
            self.__save_json__(obj, "mockdata/ELB_Balancers.json")
        return obj

    def deregister_instances(self, load_balancer_name, instances):
        return self.conn.deregister_instances(load_balancer_name, instances)

    def register_instances(self, load_balancer_name, instances):
        return self.conn.register_instances(load_balancer_name, instances)

    def create_load_balancer_listeners(self, name, listeners):
        return self.conn.create_load_balancer_listeners(name, listeners)

    def delete_load_balancer_listeners(self, name, ports):
        return self.conn.delete_load_balancer_listeners(name, ports)

    def configure_health_check(self, name, health_check):
        return self.conn.configure_health_check(name, health_check)

    def describe_instance_health(self, load_balancer_name, instances=None):
        obj = self.conn.describe_instance_health(load_balancer_name, instances)
        if self.saveclcdata:
            self.__save_json__(obj, "mockdata/ELB_Instances.json")
        return obj
class BotoBalanceInterface(BalanceInterface):
    conn = None
    saveclcdata = False

    def __init__(self, clc_host, access_id, secret_key, token):
        #boto.set_stream_logger('foo')
        path = '/services/LoadBalancing'
        port = 8773
        if clc_host[len(clc_host) - 13:] == 'amazonaws.com':
            clc_host = clc_host.replace('ec2', 'elasticloadbalancing', 1)
            path = '/'
            reg = None
            port = 443
        reg = RegionInfo(name='eucalyptus', endpoint=clc_host)
        self.conn = ELBConnection(access_id,
                                  secret_key,
                                  region=reg,
                                  port=port,
                                  path=path,
                                  is_secure=True,
                                  security_token=token,
                                  debug=0)
        self.conn.https_validate_certificates = False
        self.conn.http_connection_kwargs['timeout'] = 30

    def __save_json__(self, obj, name):
        f = open(name, 'w')
        json.dump(obj, f, cls=BotoJsonBalanceEncoder, indent=2)
        f.close()

    def create_load_balancer(self,
                             name,
                             zones,
                             listeners,
                             subnets=None,
                             security_groups=None,
                             scheme='internet-facing'):
        return self.conn.create_load_balancer(name, zones, listeners, subnets,
                                              security_groups, scheme)

    def delete_load_balancer(self, name):
        return self.conn.delete_load_balancer(name)

    def get_all_load_balancers(self, load_balancer_names=None):
        params = {}
        if load_balancer_names:
            self.build_list_params(params, load_balancer_names,
                                   'LoadBalancerNames.member.%d')
        http_request = self.conn.build_base_http_request(
            'GET', '/', None, params, {}, '', self.conn.server_name())
        http_request.params['Action'] = 'DescribeLoadBalancers'
        http_request.params['Version'] = self.conn.APIVersion
        response = self.conn._mexe(http_request, override_num_retries=2)
        body = response.read()
        boto.log.debug(body)
        if not body:
            boto.log.error('Null body %s' % body)
            raise self.conn.ResponseError(response.status, response.reason,
                                          body)
        elif response.status == 200:
            obj = boto.resultset.ResultSet([
                ('member', boto.ec2.elb.loadbalancer.LoadBalancer)
            ])
            h = boto.handler.XmlHandler(obj, self.conn)
            import xml.sax
            xml.sax.parseString(body, h)
            if self.saveclcdata:
                self.__save_json__(obj, "mockdata/ELB_Balancers.json")
            return obj
        else:
            boto.log.error('%s %s' % (response.status, response.reason))
            boto.log.error('%s' % body)
            raise self.conn.ResponseError(response.status, response.reason,
                                          body)

    def deregister_instances(self, load_balancer_name, instances):
        return self.conn.deregister_instances(load_balancer_name, instances)

    def register_instances(self, load_balancer_name, instances):
        return self.conn.register_instances(load_balancer_name, instances)

    def create_load_balancer_listeners(self, name, listeners):
        return self.conn.create_load_balancer_listeners(name, listeners)

    def delete_load_balancer_listeners(self, name, ports):
        return self.conn.delete_load_balancer_listeners(name, ports)

    def configure_health_check(self, name, health_check):
        return self.conn.configure_health_check(name, health_check)

    def describe_instance_health(self, load_balancer_name, instances=None):
        obj = self.conn.describe_instance_health(load_balancer_name, instances)
        if self.saveclcdata:
            self.__save_json__(obj, "mockdata/ELB_Instances.json")
        return obj
Exemple #8
0
class EC2System(MgmtSystemAPIBase):
    """EC2 Management System, powered by boto

    Wraps the EC2 API and mimics the behavior of other implementors of
    MgmtServiceAPIBase for us in VM control testing

    Instead of username and password, accepts access_key_id and
    secret_access_key, the AWS analogs to those ideas. These are passed, along
    with any kwargs, straight through to boto's EC2 connection factory. This
    allows customization of the EC2 connection, to connect to another region,
    for example.

    For the purposes of the EC2 system, a VM's instance ID is its name because
    EC2 instances don't have to have unique names.

    Args:
        *kwargs: Arguments to connect, usually, username, password, region.
    Returns: A :py:class:`EC2System` object.
    """

    _stats_available = {
        'num_vm': lambda self: len(self.list_vm()),
        'num_template': lambda self: len(self.list_template()),
    }

    states = {
        'running': ('running',),
        'stopped': ('stopped', 'terminated'),
        'suspended': (),
        'deleted': ('terminated',),
    }

    can_suspend = False

    def __init__(self, **kwargs):
        super(EC2System, self).__init__(kwargs)
        username = kwargs.get('username')
        password = kwargs.get('password')

        regionname = kwargs.get('region')
        region = get_region(kwargs.get('region'))
        self.api = EC2Connection(username, password, region=region)
        self.sqs_connection = connection.SQSConnection(username, password, region=_regions(
            regionmodule=sqs, regionname=regionname))
        self.elb_connection = ELBConnection(username, password, region=_regions(
            regionmodule=elb, regionname=regionname))
        self.s3_connection = boto.connect_s3(username, password)
        self.stackapi = CloudFormationConnection(username, password, region=_regions(
            regionmodule=cloudformation, regionname=regionname))
        self.kwargs = kwargs

    def disconnect(self):
        """Disconnect from the EC2 API -- NOOP

        AWS EC2 service is stateless, so there's nothing to disconnect from
        """
        pass

    def info(self):
        """Returns the current versions of boto and the EC2 API being used"""
        return '%s %s' % (boto.UserAgent, self.api.APIVersion)

    def list_vm(self, include_terminated=True):
        """Returns a list from instance IDs currently active on EC2 (not terminated)"""
        instances = None
        if include_terminated:
            instances = [inst for inst in self._get_all_instances()]
        else:
            instances = [inst for inst in self._get_all_instances() if inst.state != 'terminated']
        return [i.tags.get('Name', i.id) for i in instances]

    def list_template(self):
        private_images = self.api.get_all_images(owners=['self'],
                                                 filters={'image-type': 'machine'})
        shared_images = self.api.get_all_images(executable_by=['self'],
                                                filters={'image-type': 'machine'})
        combined_images = list(set(private_images) | set(shared_images))
        # Try to pull the image name (might not exist), falling back on ID (must exist)
        return map(lambda i: i.name or i.id, combined_images)

    def list_flavor(self):
        raise NotImplementedError('This function is not supported on this platform.')

    def vm_status(self, instance_id):
        """Returns the status of the requested instance

        Args:
            instance_id: ID of the instance to inspect
        Returns: Instance status.

        See this `page <http://docs.aws.amazon.com/AWSEC2/latest/APIReference/
        ApiReference-ItemType-InstanceStateType.html>`_ for possible return values.

        """
        instance = self._get_instance(instance_id)
        return instance.state

    def vm_type(self, instance_id):
        """Returns the instance type of the requested instance
            e.g. m1.medium, m3.medium etc..

                Args:
                    instance_id: ID of the instance to inspect
                Returns: Instance type.
        """
        instance = self._get_instance(instance_id)
        return instance.instance_type

    def vm_creation_time(self, instance_id):
        instance = self._get_instance(instance_id)
        # Example instance.launch_time: 2014-08-13T22:09:40.000Z
        launch_time = datetime.strptime(instance.launch_time[:19], '%Y-%m-%dT%H:%M:%S')
        # launch time is UTC, localize it, make it tz-naive to work with timedelta
        return tzlocal.get_localzone().fromutc(launch_time).replace(tzinfo=None)

    def create_vm(self):
        raise NotImplementedError('create_vm not implemented.')

    def delete_vm(self, instance_id):
        """Deletes the an instance

        Args:
            instance_id: ID of the instance to act on
        Returns: Whether or not the backend reports the action completed
        """
        self.logger.info(" Terminating EC2 instance %s" % instance_id)
        instance_id = self._get_instance_id_by_name(instance_id)
        try:
            self.api.terminate_instances([instance_id])
            self._block_until(instance_id, self.states['deleted'])
            return True
        except ActionTimedOutError:
            return False

    def describe_stack(self, stack_name):
        """Describe stackapi

        Returns the description for the specified stack
        Args:
            stack_name: Unique name of stack
        """
        result = []
        stacks = self.stackapi.describe_stacks(stack_name)
        result.extend(stacks)
        return result

    def stack_exist(self, stack_name):
        stacks = [stack for stack in self.describe_stack(stack_name)
            if stack.stack_name == stack_name]
        if stacks:
            return bool(stacks)

    def delete_stack(self, stack_name):
        """Deletes stack

        Args:
            stack_name: Unique name of stack
        """
        self.logger.info(" Terminating EC2 stack {}" .format(stack_name))
        try:
            self.stackapi.delete_stack(stack_name)
            return True
        except ActionTimedOutError:
            return False

    def start_vm(self, instance_id):
        """Start an instance

        Args:
            instance_id: ID of the instance to act on
        Returns: Whether or not the backend reports the action completed
        """
        self.logger.info(" Starting EC2 instance %s" % instance_id)
        instance_id = self._get_instance_id_by_name(instance_id)
        try:
            self.api.start_instances([instance_id])
            self._block_until(instance_id, self.states['running'])
            return True
        except ActionTimedOutError:
            return False

    def stop_vm(self, instance_id):
        """Stop an instance

        Args:
            instance_id: ID of the instance to act on
        Returns: Whether or not the backend reports the action completed
        """
        self.logger.info(" Stopping EC2 instance %s" % instance_id)
        instance_id = self._get_instance_id_by_name(instance_id)
        try:
            self.api.stop_instances([instance_id])
            self._block_until(instance_id, self.states['stopped'], timeout=360)
            return True
        except ActionTimedOutError:
            return False

    def restart_vm(self, instance_id):
        """Restart an instance

        Args:
            instance_id: ID of the instance to act on
        Returns: Whether or not the backend reports the action completed

        The action is taken in two separate calls to EC2. A 'False' return can
        indicate a failure of either the stop action or the start action.

        Note: There is a reboot_instances call available on the API, but it provides
            less insight than blocking on stop_vm and start_vm. Furthermore,
            there is no "rebooting" state, so there are potential monitoring
            issues that are avoided by completing these steps atomically
        """
        self.logger.info(" Restarting EC2 instance %s" % instance_id)
        return self.stop_vm(instance_id) and self.start_vm(instance_id)

    def is_vm_state(self, instance_id, state):
        return self.vm_status(instance_id) in state

    def is_vm_running(self, instance_id):
        """Is the VM running?

        Args:
            instance_id: ID of the instance to inspect
        Returns: Whether or not the requested instance is running
        """
        return self.vm_status(instance_id) in self.states['running']

    def wait_vm_running(self, instance_id, num_sec=360):
        self.logger.info(" Waiting for EC2 instance %s to change status to running" % instance_id)
        wait_for(self.is_vm_running, [instance_id], num_sec=num_sec)

    def is_vm_stopped(self, instance_id):
        """Is the VM stopped?

        Args:
            instance_id: ID of the instance to inspect
        Returns: Whether or not the requested instance is stopped
        """
        return self.vm_status(instance_id) in self.states['stopped']

    def wait_vm_stopped(self, instance_id, num_sec=360):
        self.logger.info(
            " Waiting for EC2 instance %s to change status to stopped or terminated" % instance_id
        )
        wait_for(self.is_vm_stopped, [instance_id], num_sec=num_sec)

    def suspend_vm(self, instance_id):
        """Suspend a VM: Unsupported by EC2

        Args:
            instance_id: ID of the instance to act on
        Raises:
            ActionNotSupported: The action is not supported on the system
        """
        raise ActionNotSupported()

    def is_vm_suspended(self, instance_id):
        """Is the VM suspended? We'll never know because EC2 don't support this.

        Args:
            instance_id: ID of the instance to inspect
        Raises:
            ActionNotSupported: The action is not supported on the system
        """
        raise ActionNotSupported()

    def wait_vm_suspended(self, instance_id, num_sec):
        """We would wait forever - EC2 doesn't support this.

        Args:
            instance_id: ID of the instance to wait for
        Raises:
            ActionNotSupported: The action is not supported on the system
        """
        raise ActionNotSupported()

    def clone_vm(self, source_name, vm_name):
        raise NotImplementedError('This function has not yet been implemented.')

    def deploy_template(self, template, *args, **kwargs):
        """Instantiate the requested template image (ami id)

        Accepts args/kwargs from boto's
        :py:meth:`run_instances<boto:boto.ec2.connection.EC2Connection.run_instances>` method

        Most important args are listed below.

        Args:
            template: Template name (AMI ID) to instantiate
            vm_name: Name of the instance (Name tag to set)
            instance_type: Type (flavor) of the instance

        Returns: Instance ID of the created instance

        Note: min_count and max_count args will be forced to '1'; if you're trying to do
              anything fancier than that, you might be in the wrong place

        """
        # Enforce create_vm only creating one VM
        self.logger.info(" Deploying EC2 template %s" % template)

        # strip out kwargs that ec2 doesn't understand
        timeout = kwargs.pop('timeout', 900)
        vm_name = kwargs.pop('vm_name', None)
        power_on = kwargs.pop('power_on', True)

        # Make sure we only provision one VM
        kwargs.update({'min_count': 1, 'max_count': 1})

        # sanity-check inputs
        if 'instance_type' not in kwargs:
            kwargs['instance_type'] = 'm1.small'
        if not template.startswith('ami'):
            # assume this is a lookup by name, get the ami id
            template = self._get_ami_id_by_name(template)

        # clone!
        reservation = self.api.run_instances(template, *args, **kwargs)
        instances = self._get_instances_from_reservations([reservation])
        # Should have only made one VM; return its ID for use in other methods
        self.wait_vm_running(instances[0].id, num_sec=timeout)

        if vm_name:
            self.set_name(instances[0].id, vm_name)
        if power_on:
            self.start_vm(instances[0].id)
        return instances[0].id

    def set_name(self, instance_id, new_name):
        self.logger.info("Setting name of EC2 instance %s to %s" % (instance_id, new_name))
        instance = self._get_instance(instance_id)
        instance.add_tag('Name', new_name)
        return new_name

    def get_name(self, instance_id):
        return self._get_instance(instance_id).tags.get('Name', instance_id)

    def _get_instance(self, instance_id):
        instance_id = self._get_instance_id_by_name(instance_id)
        reservations = self.api.get_all_instances([instance_id])
        instances = self._get_instances_from_reservations(reservations)
        if len(instances) > 1:
            raise MultipleInstancesError

        try:
            return instances[0]
        except KeyError:
            return None

    def current_ip_address(self, instance_id):
        return str(self._get_instance(instance_id).ip_address)

    def get_ip_address(self, instance_id, **kwargs):
        return self.current_ip_address(instance_id)

    def _get_instance_id_by_name(self, instance_name):
        # Quick validation that the instance name isn't actually an ID
        # If people start naming their instances in such a way to break this,
        # check, that would be silly, but we can upgrade to regex if necessary.
        pattern = re.compile('^i-\w{8,17}$')
        if pattern.match(instance_name):
            return instance_name

        # Filter by the 'Name' tag
        filters = {
            'tag:Name': instance_name,
        }
        reservations = self.api.get_all_instances(filters=filters)
        instances = self._get_instances_from_reservations(reservations)
        if not instances:
            raise VMInstanceNotFound(instance_name)
        elif len(instances) > 1:
            raise MultipleInstancesError('Instance name "%s" is not unique' % instance_name)

        # We have an instance! return its ID
        return instances[0].id

    def _get_ami_id_by_name(self, image_name):
        matches = self.api.get_all_images(filters={'name': image_name})
        if not matches:
            raise ImageNotFoundError(image_name)
        elif len(matches) > 1:
            raise MultipleImagesError('Template name %s returned more than one image_name. '
                'Use the ami-ID or remove duplicates from EC2' % image_name)

        return matches[0].id

    def does_vm_exist(self, name):
        try:
            self._get_instance_id_by_name(name)
            return True
        except MultipleInstancesError:
            return True
        except VMInstanceNotFound:
            return False

    def _get_instances_from_reservations(self, reservations):
        """Takes a sequence of reservations and returns their instances"""
        instances = list()
        for reservation in reservations:
            for instance in reservation.instances:
                instances.append(instance)
        return instances

    def _get_all_instances(self):
        """Gets all instances that EC2 can see"""
        reservations = self.api.get_all_instances()
        instances = self._get_instances_from_reservations(reservations)
        return instances

    def _block_until(self, instance_id, expected, timeout=90):
        """Blocks until the given instance is in one of the expected states

        Takes an optional timeout value.
        """
        wait_for(lambda: self.vm_status(instance_id) in expected, num_sec=timeout)

    def remove_host_from_cluster(self, hostname):
        raise NotImplementedError('remove_host_from_cluster not implemented')

    def create_s3_bucket(self, bucket_name):
        self.logger.info("Creating bucket: {}".format(bucket_name))
        self.s3_connection.create_bucket(bucket_name)

    def upload_file_to_s3_bucket(self, bucket_name, file_path, key_name):
        bucket = self.s3_connection.get_bucket(bucket_name)
        self.logger.info("uploading file {} to bucket: {}".format(file_path, bucket_name))
        key = boto.s3.key.Key(bucket, key_name)
        with open(file_path) as f:
            key.set_contents_from_file(f)
        self.logger.info("Success: uploading file completed")
        return True

    def get_all_disassociated_addresses(self):
        return [
            addr for addr
            in self.api.get_all_addresses()
            if not addr.instance_id and not addr.network_interface_id]

    def release_vpc_address(self, alloc_id):
        self.logger.info(" Releasing EC2 VPC EIP {}".format(str(alloc_id)))
        try:
            self.api.release_address(allocation_id=alloc_id)
            return True

        except ActionTimedOutError:
            return False

    def release_address(self, address):
        self.logger.info(" Releasing EC2-CLASSIC EIP {}".format(address))
        try:
            self.api.release_address(public_ip=address)
            return True

        except ActionTimedOutError:
            return False

    def get_all_unattached_volumes(self):
        return [volume for volume in self.api.get_all_volumes() if not
                volume.attach_data.status]

    def delete_sqs_queue(self, queue_name):
        self.logger.info(" Deleting SQS queue {}".format(queue_name))
        try:
            queue = self.sqs_connection.get_queue(queue_name=queue_name)
            if queue:
                self.sqs_connection.delete_queue(queue=queue)
                return True
            else:
                return False

        except ActionTimedOutError:
            return False

    def get_all_unused_loadbalancers(self):
        return [
            loadbalancer for loadbalancer
            in self.elb_connection.get_all_load_balancers()
            if not loadbalancer.instances]

    def delete_loadbalancer(self, loadbalancer):
        self.logger.info(" Deleting Elastic Load Balancer {}".format(loadbalancer.name))
        try:
            self.elb_connection.delete_load_balancer(loadbalancer.name)
            return True

        except ActionTimedOutError:
            return False

    def get_all_unused_network_interfaces(self):
        return [eni for eni in self.api.get_all_network_interfaces() if eni.status == "available"]
Exemple #9
0
# ---------------------------Clean UP-------------------------------------------
res = conn.get_all_instances()
ids = []
for r in res:
    ids.append(r.instances[0].id)

for s in ids:
    if s == lg_id:
        continue
    try:
        conn.terminate_instances(instance_ids=[s])
    except:
        continue

time.sleep(100);

con_elb.delete_load_balancer('ELB')
time.sleep(100)

con_as.delete_auto_scaling_group('Project2.2_AutoSacling_Group', force_delete=True)
con_as.delete_launch_configuration('Project2.2_Lauch_Config')

while True:
    try:
        conn.delete_security_group(name='LBAS')
        conn.delete_security_group(name='Load_Generator')
        break
    except:
        time.sleep(5)

Exemple #10
0
conn = EC2Connection()
con_l = ELBConnection()
con_a = AutoScaleConnection()

res = conn.get_all_instances()
ids = []
for r in res:
    ids.append(r.instances[0].id)

for s in ids:
    try:
        conn.terminate_instances(instance_ids=[s])
    except:
        continue

try:
    con_l.delete_load_balancer('ELB')
    time.sleep(60)
except:
    pass
con_a.delete_auto_scaling_group('Project2.2_AutoSacling_Group', force_delete=True)
con_a.delete_launch_configuration('Project2.2_Lauch_Config')

while True:
    try:
        conn.delete_security_group(name='LBAS')
        conn.delete_security_group(name='Load_Generator')
        break
    except:
        time.sleep(5)