def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
                 is_secure=False, port=None, proxy=None, proxy_port=None,
                 proxy_user=None, proxy_pass=None, debug=0,
                 https_connection_factory=None, region=None, path='/',
                 security_token=None, validate_certs=True):
        """
        Init method to create a new connection to EC2 Load Balancing Service.

        note:: The region argument is overridden by the region specified in
            the boto configuration file.
        """
        if not region:
            region = RegionInfo(self, self.DefaultRegionName,
                                self.DefaultRegionEndpoint)
        self.region = region
        self.cw_con = CloudWatchConnection(aws_access_key_id,
                                    aws_secret_access_key,
                                    is_secure, port, proxy, proxy_port,
                                    proxy_user, proxy_pass, debug,
                                    https_connection_factory, region, path,
                                    security_token,
                                    validate_certs=validate_certs)
        ELBConnection.__init__(self, aws_access_key_id,
                                    aws_secret_access_key,
                                    is_secure, port, proxy, proxy_port,
                                    proxy_user, proxy_pass, debug,
                                    https_connection_factory, region, path,
                                    security_token,
                                    validate_certs=validate_certs)
Example #2
0
    def handle(self, *args, **options):
        ec2_connection = EC2Connection(settings.AWS_ACCESS_KEY_ID, settings.AWS_SECRET_ACCESS_KEY)
        regions = boto.ec2.elb.regions()
        my_priv_ip = get_my_private_ip()

        for region in regions:
            region_lb_connection = ELBConnection(settings.AWS_ACCESS_KEY_ID, settings.AWS_SECRET_ACCESS_KEY, region=region)
            # regions is a list of RegionInfo with connection_cls ELBConnection
            # so for our EC2Connection we want to get an EC2Connection RegionInfo with the corresponding region descriptor
            region_connection = EC2Connection(settings.AWS_ACCESS_KEY_ID, settings.AWS_SECRET_ACCESS_KEY, region=boto.ec2.get_region(region.name))

            load_balancers = region_lb_connection.get_all_load_balancers()
            instances = [r.instances[0] for r in region_connection.get_all_instances()]
            try:
                # FIXME: TEST This in dev, stage, prod environments
                me = [i for i in instances if i.private_ip_address == my_priv_ip][0]
                my_main_group = me.groups[0].name
            except IndexError:
                me = None
                my_main_group = 'dev'
            instances = [i for i in instances if i.state != u'stopped' and i.groups[0].name == my_main_group]
            load_balancers = [lb for lb in load_balancers if lb.name == my_main_group]

            if load_balancers:
                print region, load_balancers[0]
                for instance in instances:
                    print instance.tags['Name'], instance.public_dns_name, instance.tags['Name'] + '.c2gops.com' # FIXME assumes basename
Example #3
0
 def test_next_token(self):
     elb = ELBConnection(aws_access_key_id="aws_access_key_id", aws_secret_access_key="aws_secret_access_key")
     mock_response = mock.Mock()
     mock_response.read.return_value = DISABLE_RESPONSE
     mock_response.status = 200
     elb.make_request = mock.Mock(return_value=mock_response)
     disabled = elb.disable_availability_zones("mine", ["sample-zone"])
     self.assertEqual(disabled, ["sample-zone"])
Example #4
0
    def test_detach_subnets(self):
        elb = ELBConnection(aws_access_key_id="aws_access_key_id", aws_secret_access_key="aws_secret_access_key")
        lb = LoadBalancer(elb, "mylb")

        mock_response = mock.Mock()
        mock_response.read.return_value = DETACH_RESPONSE
        mock_response.status = 200
        elb.make_request = mock.Mock(return_value=mock_response)
        lb.detach_subnets("s-xxx")
Example #5
0
 def test_request_with_marker(self):
     elb = ELBConnection(aws_access_key_id="aws_access_key_id", aws_secret_access_key="aws_secret_access_key")
     mock_response = mock.Mock()
     mock_response.read.return_value = DESCRIBE_RESPONSE
     mock_response.status = 200
     elb.make_request = mock.Mock(return_value=mock_response)
     load_balancers1 = elb.get_all_load_balancers()
     self.assertEqual("1234", load_balancers1.marker)
     load_balancers2 = elb.get_all_load_balancers(marker=load_balancers1.marker)
     self.assertEqual(len(load_balancers2), 1)
Example #6
0
 def _setup_mock(self):
     """Sets up a mock elb request.
     Returns: response, elb connection and LoadBalancer
     """
     mock_response = mock.Mock()
     mock_response.status = 200
     elb = ELBConnection(aws_access_key_id='aws_access_key_id',
                         aws_secret_access_key='aws_secret_access_key')
     elb.make_request = mock.Mock(return_value=mock_response)
     return mock_response, elb, LoadBalancer(elb, 'test_elb')
class BotoBalanceInterface(BalanceInterface):
    conn = None
    saveclcdata = False

    def __init__(self, clc_host, access_id, secret_key, token):
        #boto.set_stream_logger('foo')
        path='/services/elb'
        port=8773
        if clc_host[len(clc_host)-13:] == 'amazonaws.com':
            clc_host = clc_host.replace('ec2', 'elasticloadbalancing', 1)
            path = '/'
            reg = None
            port=443
        reg = RegionInfo(name='eucalyptus', endpoint=clc_host)
        self.conn = ELBConnection(access_id, secret_key, region=reg,
                                  port=port, path=path,
                                  is_secure=True, security_token=token, debug=0)
        self.conn.https_validate_certificates = False
        self.conn.http_connection_kwargs['timeout'] = 30

    def __save_json__(self, obj, name):
        f = open(name, 'w')
        json.dump(obj, f, cls=BotoJsonBalanceEncoder, indent=2)
        f.close()

    def create_load_balancer(self, name, zones, listeners, subnets=None,
                             security_groups=None, scheme='internet-facing'):
        return self.conn.create_load_balancer(name, zones, listeners, subnets, security_groups, scheme)
    
    def delete_load_balancer(self, name):
        return self.conn.delete_load_balancer(name)

    def get_all_load_balancers(self, load_balancer_names=None):
        return []
        obj = self.conn.get_all_load_balancers(load_balancer_names)
        if self.saveclcdata:
            self.__save_json__(obj, "mockdata/ELB_Balancers.json")
        return obj

    def deregister_instances(self, load_balancer_name, instances):
        return self.conn.deregister_instances(load_balancer_name, instances)

    def register_instances(self, load_balancer_name, instances):
        return self.conn.register_instances(load_balancer_name, instances)

    def create_load_balancer_listeners(self, name, listeners):
        return self.conn.create_load_balancer_listeners(name, listeners)

    def delete_load_balancer_listeners(self, name, ports):
        return self.conn.delete_load_balancer_listeners(name, ports)

    def configure_health_check(self, name, health_check):
        return self.conn.configure_health_check(name, health_check)

    def describe_instance_health(self, load_balancer_name, instances=None):
        obj = self.conn.describe_instance_health(load_balancer_name, instances)
        if self.saveclcdata:
            self.__save_json__(obj, "mockdata/ELB_Instances.json")
        return obj
Example #8
0
    def get_all_load_balancers():
        connection = ELBConnection()
        all_load_balancers = []
        load_balancers = connection.get_all_load_balancers()
        all_load_balancers.extend(load_balancers)

        while load_balancers.is_truncated:
            load_balancers = connection.get_all_load_balancers()
            all_load_balancers.extend(load_balancers)

        return all_load_balancers
Example #9
0
 def test_build_list_params(self):
     c = ELBConnection()
     params = {}
     c.build_list_params(
         params, ['thing1', 'thing2', 'thing3'], 'ThingName%d')
     expected_params = {
         'ThingName1': 'thing1',
         'ThingName2': 'thing2',
         'ThingName3': 'thing3'
         }
     self.assertEqual(params, expected_params)
Example #10
0
    def test_create_load_balancer(self):
        c = ELBConnection()
        name = 'elb-boto-unit-test'
        availability_zones = ['us-east-1a']
        listeners = [(80, 8000, 'HTTP')]
        balancer = c.create_load_balancer(name, availability_zones, listeners)
        self.assertEqual(balancer.name, name)
        self.assertEqual(balancer.availability_zones, availability_zones)
        self.assertEqual(balancer.listeners, listeners)

        balancers = c.get_all_load_balancers()
        self.assertEqual([lb.name for lb in balancers], [name])
Example #11
0
def _remove_instance_from_load_balancer(conn, instance, instance_info) :
    if 'load_balancer' in instance_info : 
        load_balancer_name = instance_info['load_balancer']
                
        elb_conn = ELBConnection()
        
        if _instance_in_load_balancer(elb_conn, load_balancer_name, instance.id) :
            print("Removing Instance(s) " + str([instance.id]) + " from load balancer " + load_balancer_name)
            elb_conn.deregister_instances(load_balancer_name, [instance.id])
        else :
            print("Instance(s) " + str([instance.id]) + " not associated with load balancer " + load_balancer_name + ", Nothing left to do")
        
    else :
        print("Skipping disassociation with Load Balancer ")
Example #12
0
    def test_create_load_balancer_listeners(self):
        c = ELBConnection()
        name = 'elb-boto-unit-test'
        availability_zones = ['us-east-1a']
        listeners = [(80, 8000, 'HTTP')]
        balancer = c.create_load_balancer(name, availability_zones, listeners)

        more_listeners = [(443, 8001, 'HTTP')]
        c.create_load_balancer_listeners(name, more_listeners)
        balancers = c.get_all_load_balancers()
        self.assertEqual([lb.name for lb in balancers], [name])
        self.assertEqual(
            sorted(l.get_tuple() for l in balancers[0].listeners),
            sorted(listeners + more_listeners)
            )
Example #13
0
def _add_instance_to_load_balancer(conn, instance, instance_info) :
    if 'load_balancer' in instance_info : 
        load_balancer_name = instance_info['load_balancer']
                
        elb_conn = ELBConnection()
        
        if _instance_in_load_balancer(elb_conn, load_balancer_name, instance.id) :
            print("Removing Instance(s) " + str([instance.id]) + " from load balancer " + load_balancer_name)
            elb_conn.deregister_instances(load_balancer_name, [instance.id])

        print("Adding Instance(s) " + str([instance.id]) + " to load balancer " + load_balancer_name)
        elb_conn.register_instances(load_balancer_name, [instance.id])
        
    else :
        print("Skipping association with Load Balancer ")
Example #14
0
 def __init__(self,PREFIX='tfound-',ENV='dev',AMI='',TYPE='',SIZE='',
              DOMAIN='tfound',SSHKEY='myprivatekey',AWSKEY='',AWSSECRET='',AVAIL_ZONES=["us-east-1a","us-east-1b","us-east-1c","us-east-1d"]):
     '''
     Shows examples
     Create load balancer group 'tfound-dev-web-lb' for web servers, in dev group for tfound:
         python control-lb-and-groups.py --createlb --env dev --aws SC --type web
     Add an instance to the load balancer group:
         python control-lb-and-groups.py --addtolb=true --env dev --aws SC --type web --instance=i-999999
     Create launch config using ami ami-fa6b8393 (default), medium sized instance, and Autoscale Group 'tfound-dev-web-group' with a min of 2 instances, max 5, with health check on port 80:
         python control-lb-and-groups.py  --createlc --ami ami-fa6b8393 --size c1.medium --env dev --aws SC --type web --createag --min 2 --max 5
     Triggers/Health checks are hard coded to spawn new instances when total cpu reaches 60 percent or health check fails.
     '''
     self.PREFIX=PREFIX+DOMAIN+'-'+ENV+'-'+TYPE
     self.ENV=ENV
     self.AMI=AMI
     self.TYPE=TYPE
     self.DOMAIN=DOMAIN
     self.SIZE=SIZE
     self.MIN=MIN
     self.MAX=MAX
     self.SSHKEY=SSHKEY
     self.AWSKEY=AWSKEY
     self.AWSSECRET=AWSSECRET
     self.AVAIL_ZONES=AVAIL_ZONES
     self.LBNAME=self.PREFIX+'-lb'
     self.AGNAME=self.PREFIX+'-group'
     self.TRNAME=self.PREFIX+'-trigger'
     self.LCNAME=self.PREFIX+'-launch_config'
     self.asconn=AutoScaleConnection(self.AWSKEY, self.AWSSECRET)
     self.elbconn = ELBConnection(aws_access_key_id=AWSKEY,aws_secret_access_key=AWSSECRET)
     self.lc = self._buildLaunchConfig()
     self.ag = self._buildAutoscaleGroup()
Example #15
0
 def setUp(self):
     """Creates a named load balancer that can be safely
     deleted at the end of each test"""
     self.conn = ELBConnection()
     self.name = 'elb-boto-unit-test'
     self.availability_zones = ['us-east-1a']
     self.listeners = [(80, 8000, 'HTTP')]
     self.balancer = self.conn.create_load_balancer(self.name, self.availability_zones, self.listeners)
Example #16
0
    def __init__(self, name):
        """Initialize the instance.

        Args:
            name (unicode):
                The name of the load balancer.
        """
        self.name = name
        self._cnx = ELBConnection()
 def __init__(self, clc_host, access_id, secret_key, token):
     #boto.set_stream_logger('foo')
     path='/services/elb'
     port=8773
     if clc_host[len(clc_host)-13:] == 'amazonaws.com':
         clc_host = clc_host.replace('ec2', 'elasticloadbalancing', 1)
         path = '/'
         reg = None
         port=443
     reg = RegionInfo(name='eucalyptus', endpoint=clc_host)
     if boto.__version__ < '2.6':
         self.conn = ELBConnection(access_id, secret_key, region=reg,
                               port=port, path=path,
                               is_secure=True, security_token=token, debug=0)
     else:
         self.conn = ELBConnection(access_id, secret_key, region=reg,
                               port=port, path=path, validate_certs=False,
                               is_secure=True, security_token=token, debug=0)
     self.conn.http_connection_kwargs['timeout'] = 30
Example #18
0
    def test_other_policy(self):
        elb = ELBConnection(aws_access_key_id="aws_access_key_id", aws_secret_access_key="aws_secret_access_key")
        mock_response = mock.Mock()
        mock_response.read.return_value = DESCRIBE_RESPONSE
        mock_response.status = 200
        elb.make_request = mock.Mock(return_value=mock_response)
        load_balancers = elb.get_all_load_balancers()
        self.assertEqual(len(load_balancers), 1)

        lb = load_balancers[0]
        self.assertEqual(len(lb.policies.other_policies), 2)
        self.assertEqual(
            lb.policies.other_policies[0].policy_name, "AWSConsole-SSLNegotiationPolicy-my-test-loadbalancer"
        )
        self.assertEqual(lb.policies.other_policies[1].policy_name, "EnableProxyProtocol")

        self.assertEqual(len(lb.backends), 1)
        self.assertEqual(len(lb.backends[0].policies), 1)
        self.assertEqual(lb.backends[0].policies[0].policy_name, "EnableProxyProtocol")
        self.assertEqual(lb.backends[0].instance_port, 80)
Example #19
0
def get_api_elb(stack=None):
    if not is_ec2():
        return None

    stack = get_stack(stack)
    api_nodes = filter(lambda n: "apiServer" in n.roles, stack.nodes)
    api_ids = (instance.instance_id for instance in api_ids)

    # get all ELBs
    conn = ELBConnection(keys.aws.AWS_ACCESS_KEY_ID, keys.aws.AWS_SECRET_KEY)
    elbs = conn.get_all_load_balancers()

    # attempt to find the ELB belonging to this stack's set of API servers
    for elb in elbs:
        # TODO: this is deprecated;
        for awsInstance in elb.instances:
            if awsInstance.id in instance_ids:
                return elb

    return None
Example #20
0
    def _get_elb(self, instance_or_instances):
        # TODO: this function doesn't account for the case where an instance
        # may belong to multiple ELBs. NOTE that this scenario will never
        # arise in our current stack architecture, but I'm leaving this
        # note in here just in case that assumption changes in the future.

        if isinstance(instance_or_instances, (list, tuple, set)):
            instances = instance_or_instances
        else:
            instances = [instance_or_instances]

        # get all ELBs
        conn = ELBConnection(AWSDeploymentPlatform.AWS_ACCESS_KEY_ID, AWSDeploymentPlatform.AWS_SECRET_KEY)
        elbs = conn.get_all_load_balancers()

        # attempt to find the ELB belonging to this stack's set of API servers
        for elb in elbs:
            for awsInstance in elb.instances:
                for instance in instances:
                    if awsInstance.id == instance.instance_id:
                        return elb

        return None
Example #21
0
 def __init__(self, args):
     """
     Initializing basic variables needed for auto scaling
     """
     self.configs                = ConfigParser.RawConfigParser()
     self.args                   = args
     self.test_props             = {}
     self.props                  = {}
     self.ec2_connection         = EC2Connection(self.args.access_key, self.args.secret_key)
     self.autoscale_connection   = AutoScaleConnection(self.args.access_key, self.args.secret_key)
     self.elb_connection         = ELBConnection(self.args.access_key, self.args.secret_key)
     self.cw_connection          = CloudWatchConnection(self.args.access_key, self.args.secret_key)
     self.firstInstance          = None
     self.launchConfiguration    = None
     self.healthCheck            = None
Example #22
0
 def __init__(self, clc_host, access_id, secret_key, token):
     # boto.set_stream_logger('foo')
     path = "/services/LoadBalancing"
     port = 8773
     if clc_host[len(clc_host) - 13 :] == "amazonaws.com":
         clc_host = clc_host.replace("ec2", "elasticloadbalancing", 1)
         path = "/"
         reg = None
         port = 443
     reg = RegionInfo(name="eucalyptus", endpoint=clc_host)
     self.conn = ELBConnection(
         access_id, secret_key, region=reg, port=port, path=path, is_secure=True, security_token=token, debug=0
     )
     self.conn.https_validate_certificates = False
     self.conn.http_connection_kwargs["timeout"] = 30
 def set_endpoint(self, endpoint):
     #boto.set_stream_logger('foo')
     reg = RegionInfo(name='eucalyptus', endpoint=endpoint)
     path = '/services/LoadBalancing'
     port = 8773
     if endpoint[len(endpoint)-13:] == 'amazonaws.com':
         endpoint = endpoint.replace('ec2', 'elasticloadbalancing', 1)
         path = '/'
         reg = RegionInfo(endpoint=endpoint)
         port = 443
     self.conn = ELBConnection(self.access_id, self.secret_key, region=reg,
                               port=port, path=path,
                               is_secure=True, security_token=self.token, debug=0)
     self.conn.https_validate_certificates = False
     self.conn.http_connection_kwargs['timeout'] = 30
Example #24
0
    def setUp(self):
        """Creates a named load balancer that can be safely
        deleted at the end of each test"""
        self.conn = ELBConnection()
        self.name = 'elb-boto-unit-test'
        self.availability_zones = ['us-east-1a']
        self.listeners = [(80, 8000, 'HTTP')]
        self.balancer = self.conn.create_load_balancer(self.name, self.availability_zones, self.listeners)

        # S3 bucket for log tests
        self.s3 = boto.connect_s3()
        self.timestamp = str(int(time.time()))
        self.bucket_name = 'boto-elb-%s' % self.timestamp
        self.bucket = self.s3.create_bucket(self.bucket_name)
        self.bucket.set_canned_acl('public-read-write')
        self.addCleanup(self.cleanup_bucket, self.bucket)
Example #25
0
class LoadBalancers(object):
  def __init__(self, parent):
    self.parent = parent
    self.conn = ELBConnection()
    self.lbs = []
    self.cw_conn = CloudWatchConnection()

  def __call__(self):
    self.lbs = [LoadBalancer(self, lb) for lb in self.conn.get_all_load_balancers()]
    return self
    
  def status(self):
    for lb in self.lbs:
      lb.status()
    
  def __getitem__(self, k):
    if isinstance(k, str) or isinstance(k, unicode):
      return [lb for lb in self.lbs if lb.lb.name == k].pop()
    else:
      return self.lbs[k]
    
  def __repr__(self):
    return 'LoadBalancers:' + ",".join([lb.name for lb in self.lbs])
Example #26
0
    def test_delete_load_balancer_listeners(self):
        c = ELBConnection()
        name = 'elb-boto-unit-test'
        availability_zones = ['us-east-1a']
        listeners = [(80, 8000, 'HTTP', 'HTTP', None), (443, 8001, 'HTTP', 'HTTP', None)]
        balancer = c.create_load_balancer(name, availability_zones, listeners)

        balancers = c.get_all_load_balancers()
        self.assertEqual([lb.name for lb in balancers], [name])
        self.assertEqual(
            [l.get_tuple() for l in balancers[0].listeners], listeners)

        c.delete_load_balancer_listeners(name, [443])
        balancers = c.get_all_load_balancers()
        self.assertEqual([lb.name for lb in balancers], [name])
        self.assertEqual(
            [l.get_tuple() for l in balancers[0].listeners],
            listeners[:1]
            )
Example #27
0
				placement=ZONE, security_groups=[LGSG_NAME])
lg_instance = reservation.instances[0]
time.sleep(10)
# wait for load generator to run
while not lg_instance.update() == 'running':
	time.sleep(3)
time.sleep(5)
# add tag
lg_instance.add_tag(TAGK, TAGV)
time.sleep(5)
print lg_instance.id
print lg_instance.dns_name
print lg_instance.tags
print 'Creating ELB'
# initialize elastc load balancer
conn2 = ELBConnection(os.environ['AWS_ACCESS_KEY_ID'], os.environ['AWS_SECRET_KEY'])
# set heartbeat
page = 'HTTP:80' + '/heartbeat?lg=' + lg_instance.dns_name
hc = HealthCheck(interval=20, healthy_threshold=3, unhealthy_threshold=5, target=page)
# set port 80
elb = conn2.create_load_balancer('elb', [ZONE], [(80, 80, 'http')])
# allow all traffic
conn2.apply_security_groups_to_lb('elb', [sg.id])
conn2.configure_health_check('elb', hc)
print elb.dns_name
print 'Creating ASG'
# initialize launch config
conn3 = AutoScaleConnection(os.environ['AWS_ACCESS_KEY_ID'], os.environ['AWS_SECRET_KEY'])
config = LaunchConfiguration(name='config', image_id=DC_IMAGE, security_groups=sgs,
							 instance_type=TYPE, instance_monitoring=True)
conn3.create_launch_configuration(config)
class BotoBalanceInterface(BalanceInterface):
    conn = None
    saveclcdata = False

    def __init__(self, clc_host, access_id, secret_key, token):
        self.access_id = access_id
        self.secret_key = secret_key
        self.token = token
        self.set_endpoint(clc_host)

    def set_endpoint(self, endpoint):
        #boto.set_stream_logger('foo')
        reg = RegionInfo(name='eucalyptus', endpoint=endpoint)
        path = '/services/LoadBalancing'
        port = 8773
        if endpoint[len(endpoint)-13:] == 'amazonaws.com':
            endpoint = endpoint.replace('ec2', 'elasticloadbalancing', 1)
            path = '/'
            reg = RegionInfo(endpoint=endpoint)
            port = 443
        self.conn = ELBConnection(self.access_id, self.secret_key, region=reg,
                                  port=port, path=path,
                                  is_secure=True, security_token=self.token, debug=0)
        self.conn.https_validate_certificates = False
        self.conn.http_connection_kwargs['timeout'] = 30

    def __save_json__(self, obj, name):
        f = open(name, 'w')
        json.dump(obj, f, cls=BotoJsonBalanceEncoder, indent=2)
        f.close()

    def create_load_balancer(self, name, zones, listeners, subnets=None,
                             security_groups=None, scheme='internet-facing'):
        return self.conn.create_load_balancer(name, zones, listeners, subnets, security_groups, scheme)

    def delete_load_balancer(self, name):
        return self.conn.delete_load_balancer(name)

    def get_all_load_balancers(self, load_balancer_names=None):
        params = {}
        if load_balancer_names:
            self.build_list_params(params, load_balancer_names,
                                   'LoadBalancerNames.member.%d')
        http_request = self.conn.build_base_http_request('GET', '/', None,
                                                         params, {}, '',
                                                         self.conn.server_name())
        http_request.params['Action'] = 'DescribeLoadBalancers'
        http_request.params['Version'] = self.conn.APIVersion
        response = self.conn._mexe(http_request, override_num_retries=2)
        body = response.read()
        boto.log.debug(body)
        if not body:
            boto.log.error('Null body %s' % body)
            raise self.conn.ResponseError(response.status, response.reason, body)
        elif response.status == 200:
            obj = boto.resultset.ResultSet([('member', boto.ec2.elb.loadbalancer.LoadBalancer)])
            h = boto.handler.XmlHandler(obj, self.conn)
            import xml.sax;

            xml.sax.parseString(body, h)
            if self.saveclcdata:
                self.__save_json__(obj, "mockdata/ELB_Balancers.json")
            return obj
        else:
            boto.log.error('%s %s' % (response.status, response.reason))
            boto.log.error('%s' % body)
            raise self.conn.ResponseError(response.status, response.reason, body)


    def deregister_instances(self, load_balancer_name, instances):
        return self.conn.deregister_instances(load_balancer_name, instances)

    def register_instances(self, load_balancer_name, instances):
        return self.conn.register_instances(load_balancer_name, instances)

    def create_load_balancer_listeners(self, name, listeners):
        return self.conn.create_load_balancer_listeners(name, listeners)

    def delete_load_balancer_listeners(self, name, ports):
        return self.conn.delete_load_balancer_listeners(name, ports)

    def configure_health_check(self, name, health_check):
        return self.conn.configure_health_check(name, health_check)

    def describe_instance_health(self, load_balancer_name, instances=None):
        obj = self.conn.describe_instance_health(load_balancer_name, instances)
        if self.saveclcdata:
            self.__save_json__(obj, "mockdata/ELB_Instances.json")
        return obj
Example #29
0
'instance_type': 't1.micro', #The size of instance that will be launched
'instance_monitoring': True #Indicated whether the instances will be launched with detailed monitoring enabled. Needed to enable CloudWatch
}
 
##############################END CONFIGURATION#######################################
 
#=================Construct a list of all availability zones for your region=========
conn_reg = boto.ec2.connect_to_region(region_name=region)
zones = conn_reg.get_all_zones()
 
zoneStrings = []
for zone in zones:
zoneStrings.append(zone.name)
 
 
conn_elb = ELBConnection(AWS_ACCESS_KEY, AWS_SECRET_KEY)
conn_as = AutoScaleConnection(AWS_ACCESS_KEY, AWS_SECRET_KEY)
 
#=================Create a Load Balancer=============================================
#For a complete list of options see http://boto.cloudhackers.com/ref/ec2.html#module-boto.ec2.elb.healthcheck
hc = HealthCheck('healthCheck',
interval=elastic_load_balancer['interval'],
target=elastic_load_balancer['health_check_target'],
timeout=elastic_load_balancer['timeout'])
 
#For a complete list of options see http://boto.cloudhackers.com/ref/ec2.html#boto.ec2.elb.ELBConnection.create_load_balancer
lb = conn_elb.create_load_balancer(elastic_load_balancer['name'],
zoneStrings,
elastic_load_balancer['connection_forwarding'])
 
lb.configure_health_check(hc)
Example #30
0
class ELBConnectionTest(unittest.TestCase):
    ec2 = True

    def setUp(self):
        """Creates a named load balancer that can be safely
        deleted at the end of each test"""
        self.conn = ELBConnection()
        self.name = 'elb-boto-unit-test'
        self.availability_zones = ['us-east-1a']
        self.listeners = [(80, 8000, 'HTTP')]
        self.balancer = self.conn.create_load_balancer(self.name, self.availability_zones, self.listeners)

    def tearDown(self):
        """ Deletes the test load balancer after every test.
        It does not delete EVERY load balancer in your account"""
        self.balancer.delete()

    def test_build_list_params(self):
        params = {}
        self.conn.build_list_params(
            params, ['thing1', 'thing2', 'thing3'], 'ThingName%d')
        expected_params = {
            'ThingName1': 'thing1',
            'ThingName2': 'thing2',
            'ThingName3': 'thing3'
            }
        self.assertEqual(params, expected_params)

    # TODO: for these next tests, consider sleeping until our load
    # balancer comes up, then testing for connectivity to
    # balancer.dns_name, along the lines of the existing EC2 unit tests.

    def test_create_load_balancer(self):
        self.assertEqual(self.balancer.name, self.name)
        self.assertEqual(self.balancer.availability_zones,\
            self.availability_zones)
        self.assertEqual(self.balancer.listeners, self.listeners)

        balancers = self.conn.get_all_load_balancers()
        self.assertEqual([lb.name for lb in balancers], [self.name])

    def test_create_load_balancer_listeners(self):
        more_listeners = [(443, 8001, 'HTTP')]
        self.conn.create_load_balancer_listeners(self.name, more_listeners)
        balancers = self.conn.get_all_load_balancers()
        self.assertEqual([lb.name for lb in balancers], [self.name])
        self.assertEqual(
            sorted(l.get_tuple() for l in balancers[0].listeners),
            sorted(self.listeners + more_listeners)
            )

    def test_delete_load_balancer_listeners(self):
        mod_listeners = [(80, 8000, 'HTTP'), (443, 8001, 'HTTP')]
        mod_name = self.name + "-mod"
        self.mod_balancer = self.conn.create_load_balancer(mod_name,\
            self.availability_zones, mod_listeners)

        mod_balancers = self.conn.get_all_load_balancers(load_balancer_names=[mod_name])
        self.assertEqual([lb.name for lb in mod_balancers], [mod_name])
        self.assertEqual(
            sorted([l.get_tuple() for l in mod_balancers[0].listeners]),
            sorted(mod_listeners))

        self.conn.delete_load_balancer_listeners(self.mod_balancer.name, [443])
        mod_balancers = self.conn.get_all_load_balancers(load_balancer_names=[mod_name])
        self.assertEqual([lb.name for lb in mod_balancers], [mod_name])
        self.assertEqual([l.get_tuple() for l in mod_balancers[0].listeners],
                         mod_listeners[:1])
        self.mod_balancer.delete()

    def test_create_load_balancer_listeners_with_policies(self):
        more_listeners = [(443, 8001, 'HTTP')]
        self.conn.create_load_balancer_listeners(self.name, more_listeners)

        lb_policy_name = 'lb-policy'
        self.conn.create_lb_cookie_stickiness_policy(1000, self.name, lb_policy_name)
        self.conn.set_lb_policies_of_listener(self.name, self.listeners[0][0], lb_policy_name)

        app_policy_name = 'app-policy'
        self.conn.create_app_cookie_stickiness_policy('appcookie', self.name, app_policy_name)
        self.conn.set_lb_policies_of_listener(self.name, more_listeners[0][0], app_policy_name)

        balancers = self.conn.get_all_load_balancers(load_balancer_names=[self.name])
        self.assertEqual([lb.name for lb in balancers], [self.name])
        self.assertEqual(
            sorted(l.get_tuple() for l in balancers[0].listeners),
            sorted(self.listeners + more_listeners)
            )
        # Policy names should be checked here once they are supported
        # in the Listener object.

    def test_create_load_balancer_complex_listeners(self):
        complex_listeners = [
            (8080, 80, 'HTTP', 'HTTP'),
            (2525, 25, 'TCP', 'TCP'),
        ]

        self.conn.create_load_balancer_listeners(
            self.name,
            complex_listeners=complex_listeners
        )

        balancers = self.conn.get_all_load_balancers(
            load_balancer_names=[self.name]
        )
        self.assertEqual([lb.name for lb in balancers], [self.name])
        self.assertEqual(
            sorted(l.get_complex_tuple() for l in balancers[0].listeners),
            # We need an extra 'HTTP' here over what ``self.listeners`` uses.
            sorted([(80, 8000, 'HTTP', 'HTTP')] + complex_listeners)
        )
Example #31
0
 def __init__(self, parent):
   self.parent = parent
   self.conn = ELBConnection()
   self.lbs = []
   self.cw_conn = CloudWatchConnection()
Example #32
0
class ELBConnectionTest(unittest.TestCase):
    ec2 = True

    def setUp(self):
        """Creates a named load balancer that can be safely
        deleted at the end of each test"""
        self.conn = ELBConnection()
        self.name = 'elb-boto-unit-test'
        self.availability_zones = ['us-east-1a']
        self.listeners = [(80, 8000, 'HTTP')]
        self.balancer = self.conn.create_load_balancer(self.name, self.availability_zones, self.listeners)

    def tearDown(self):
        """ Deletes the test load balancer after every test.
        It does not delete EVERY load balancer in your account"""
        self.balancer.delete()

    def test_build_list_params(self):
        params = {}
        self.conn.build_list_params(
            params, ['thing1', 'thing2', 'thing3'], 'ThingName%d')
        expected_params = {
            'ThingName1': 'thing1',
            'ThingName2': 'thing2',
            'ThingName3': 'thing3'
            }
        self.assertEqual(params, expected_params)

    # TODO: for these next tests, consider sleeping until our load
    # balancer comes up, then testing for connectivity to
    # balancer.dns_name, along the lines of the existing EC2 unit tests.

    def test_create_load_balancer(self):
        self.assertEqual(self.balancer.name, self.name)
        self.assertEqual(self.balancer.availability_zones,\
            self.availability_zones)
        self.assertEqual(self.balancer.listeners, self.listeners)

        balancers = self.conn.get_all_load_balancers()
        self.assertEqual([lb.name for lb in balancers], [self.name])

    def test_create_load_balancer_listeners(self):
        more_listeners = [(443, 8001, 'HTTP')]
        self.conn.create_load_balancer_listeners(self.name, more_listeners)
        balancers = self.conn.get_all_load_balancers()
        self.assertEqual([lb.name for lb in balancers], [self.name])
        self.assertEqual(
            sorted(l.get_tuple() for l in balancers[0].listeners),
            sorted(self.listeners + more_listeners)
            )

    def test_delete_load_balancer_listeners(self):
        mod_listeners = [(80, 8000, 'HTTP'), (443, 8001, 'HTTP')]
        mod_name = self.name + "-mod"
        self.mod_balancer = self.conn.create_load_balancer(mod_name,\
            self.availability_zones, mod_listeners)

        mod_balancers = self.conn.get_all_load_balancers(load_balancer_names=[mod_name])
        self.assertEqual([lb.name for lb in mod_balancers], [mod_name])
        self.assertEqual(
            sorted([l.get_tuple() for l in mod_balancers[0].listeners]),
            sorted(mod_listeners))

        self.conn.delete_load_balancer_listeners(self.mod_balancer.name, [443])
        mod_balancers = self.conn.get_all_load_balancers(load_balancer_names=[mod_name])
        self.assertEqual([lb.name for lb in mod_balancers], [mod_name])
        self.assertEqual([l.get_tuple() for l in mod_balancers[0].listeners],
                         mod_listeners[:1])
        self.mod_balancer.delete()

    def test_create_load_balancer_listeners_with_policies(self):
        more_listeners = [(443, 8001, 'HTTP')]
        self.conn.create_load_balancer_listeners(self.name, more_listeners)

        lb_policy_name = 'lb-policy'
        self.conn.create_lb_cookie_stickiness_policy(1000, self.name, lb_policy_name)
        self.conn.set_lb_policies_of_listener(self.name, self.listeners[0][0], lb_policy_name)

        app_policy_name = 'app-policy'
        self.conn.create_app_cookie_stickiness_policy('appcookie', self.name, app_policy_name)
        self.conn.set_lb_policies_of_listener(self.name, more_listeners[0][0], app_policy_name)

        balancers = self.conn.get_all_load_balancers(load_balancer_names=[self.name])
        self.assertEqual([lb.name for lb in balancers], [self.name])
        self.assertEqual(
            sorted(l.get_tuple() for l in balancers[0].listeners),
            sorted(self.listeners + more_listeners)
            )
        # Policy names should be checked here once they are supported
        # in the Listener object.

    def test_create_load_balancer_backend_with_policies(self):
        other_policy_name = 'enable-proxy-protocol'
        backend_port = 8081
        self.conn.create_lb_policy(self.name, other_policy_name,
                                   'ProxyProtocolPolicyType', {'ProxyProtocol': True})
        self.conn.set_lb_policies_of_backend_server(self.name, backend_port, [other_policy_name])

        balancers = self.conn.get_all_load_balancers(load_balancer_names=[self.name])
        self.assertEqual([lb.name for lb in balancers], [self.name])
        self.assertEqual(len(balancers[0].policies.other_policies), 1)
        self.assertEqual(balancers[0].policies.other_policies[0].policy_name, other_policy_name)
        self.assertEqual(len(balancers[0].backends), 1)
        self.assertEqual(balancers[0].backends[0].instance_port, backend_port)
        self.assertEqual(balancers[0].backends[0].policies[0].policy_name, other_policy_name)

        self.conn.set_lb_policies_of_backend_server(self.name, backend_port, [])

        balancers = self.conn.get_all_load_balancers(load_balancer_names=[self.name])
        self.assertEqual([lb.name for lb in balancers], [self.name])
        self.assertEqual(len(balancers[0].policies.other_policies), 1)
        self.assertEqual(len(balancers[0].backends), 0)

    def test_create_load_balancer_complex_listeners(self):
        complex_listeners = [
            (8080, 80, 'HTTP', 'HTTP'),
            (2525, 25, 'TCP', 'TCP'),
        ]

        self.conn.create_load_balancer_listeners(
            self.name,
            complex_listeners=complex_listeners
        )

        balancers = self.conn.get_all_load_balancers(
            load_balancer_names=[self.name]
        )
        self.assertEqual([lb.name for lb in balancers], [self.name])
        self.assertEqual(
            sorted(l.get_complex_tuple() for l in balancers[0].listeners),
            # We need an extra 'HTTP' here over what ``self.listeners`` uses.
            sorted([(80, 8000, 'HTTP', 'HTTP')] + complex_listeners)
        )
Example #33
0
class BotoBalanceInterface(BalanceInterface):
    conn = None
    saveclcdata = False

    def __init__(self, clc_host, access_id, secret_key, token):
        self.access_id = access_id
        self.secret_key = secret_key
        self.token = token
        self.set_endpoint(clc_host)

    def set_endpoint(self, endpoint):
        #boto.set_stream_logger('foo')
        reg = RegionInfo(name='eucalyptus', endpoint=endpoint)
        path = '/services/LoadBalancing'
        port = 8773
        if endpoint[len(endpoint) - 13:] == 'amazonaws.com':
            endpoint = endpoint.replace('ec2', 'elasticloadbalancing', 1)
            path = '/'
            reg = RegionInfo(endpoint=endpoint)
            port = 443
        self.conn = ELBConnection(self.access_id,
                                  self.secret_key,
                                  region=reg,
                                  port=port,
                                  path=path,
                                  is_secure=True,
                                  security_token=self.token,
                                  debug=0)
        self.conn.https_validate_certificates = False
        self.conn.http_connection_kwargs['timeout'] = 30

    def __save_json__(self, obj, name):
        f = open(name, 'w')
        json.dump(obj, f, cls=BotoJsonBalanceEncoder, indent=2)
        f.close()

    def create_load_balancer(self,
                             name,
                             zones,
                             listeners,
                             subnets=None,
                             security_groups=None,
                             scheme='internet-facing'):
        return self.conn.create_load_balancer(name, zones, listeners, subnets,
                                              security_groups, scheme)

    def delete_load_balancer(self, name):
        return self.conn.delete_load_balancer(name)

    def get_all_load_balancers(self, load_balancer_names=None):
        params = {}
        if load_balancer_names:
            self.build_list_params(params, load_balancer_names,
                                   'LoadBalancerNames.member.%d')
        http_request = self.conn.build_base_http_request(
            'GET', '/', None, params, {}, '', self.conn.server_name())
        http_request.params['Action'] = 'DescribeLoadBalancers'
        http_request.params['Version'] = self.conn.APIVersion
        response = self.conn._mexe(http_request, override_num_retries=2)
        body = response.read()
        boto.log.debug(body)
        if not body:
            boto.log.error('Null body %s' % body)
            raise self.conn.ResponseError(response.status, response.reason,
                                          body)
        elif response.status == 200:
            obj = boto.resultset.ResultSet([
                ('member', boto.ec2.elb.loadbalancer.LoadBalancer)
            ])
            h = boto.handler.XmlHandler(obj, self.conn)
            import xml.sax

            xml.sax.parseString(body, h)
            if self.saveclcdata:
                self.__save_json__(obj, "mockdata/ELB_Balancers.json")
            return obj
        else:
            boto.log.error('%s %s' % (response.status, response.reason))
            boto.log.error('%s' % body)
            raise self.conn.ResponseError(response.status, response.reason,
                                          body)

    def deregister_instances(self, load_balancer_name, instances):
        return self.conn.deregister_instances(load_balancer_name, instances)

    def register_instances(self, load_balancer_name, instances):
        return self.conn.register_instances(load_balancer_name, instances)

    def create_load_balancer_listeners(self, name, listeners):
        return self.conn.create_load_balancer_listeners(name, listeners)

    def delete_load_balancer_listeners(self, name, ports):
        return self.conn.delete_load_balancer_listeners(name, ports)

    def configure_health_check(self, name, health_check):
        return self.conn.configure_health_check(name, health_check)

    def describe_instance_health(self, load_balancer_name, instances=None):
        obj = self.conn.describe_instance_health(load_balancer_name, instances)
        if self.saveclcdata:
            self.__save_json__(obj, "mockdata/ELB_Instances.json")
        return obj
Example #34
0
    'statistic': 'Average',
    'upThreshold': 15,
    'downThreshold': 10,
    'period': 60,
    'evaluationPeriods': 1
}

######################### end parameter block ################################

######################### begin configuration ################################
# make the connections
conn_ec2 = boto.ec2.connect_to_region(regionName,
                                      aws_access_key_id=AWS_ACCESS_KEY,
                                      aws_secret_access_key=AWS_SECRET_KEY)
conn_reg = boto.ec2.elb.connect_to_region(regionName)
conn_elb = ELBConnection(AWS_ACCESS_KEY, AWS_SECRET_KEY)
conn_as = AutoScaleConnection(AWS_ACCESS_KEY, AWS_SECRET_KEY)
conn_cw = boto.ec2.cloudwatch.connect_to_region(regionName)
conn_cw = boto.ec2.cloudwatch.connect_to_region(
    regionName,
    aws_access_key_id=AWS_ACCESS_KEY,
    aws_secret_access_key=AWS_SECRET_KEY)

# ============================================== #
# configure a health check
hc = HealthCheck(
    interval=elastic_load_balancer['interval'],
    healthy_threshold=elastic_load_balancer['healthyThreshold'],
    unhealthy_threshold=elastic_load_balancer['unhealthyThreshold'],
    target=elastic_load_balancer['healthCheckTarget'])
# create a load balancer
Example #35
0
 def __init__(self, aws_access_key, aws_secret_key):
     self.ec2_conn = EC2Connection(aws_access_key, aws_secret_key)
     self.elb_conn = ELBConnection(aws_access_key, aws_secret_key)
     self.auto_scale_conn = AutoScaleConnection(aws_access_key, aws_secret_key)
     self.cloud_watch_conn = CloudWatchConnection(aws_access_key, aws_secret_key)
     self.default_cooldown = 60
Example #36
0
 
#=================Construct a list of all availability zones for your region=========

get_reg = boto.ec2.connect_to_region(region_name=region )
    
print get_reg
zones = get_reg.get_all_zones()
 

zoneStrings = []
for zone in zones:
    zoneStrings.append(zone.name)

print zoneStrings;

conn_elb = ELBConnection()
conn_as = AutoScaleConnection()

all_elb = conn_elb.get_all_load_balancers()



##=================Create a Load Balancer=============================================
#For a complete list of options see http://boto.cloudhackers.com/ref/ec2.html#module-boto.ec2.elb.healthcheck
hc = HealthCheck('healthCheck',
                     interval=elastic_load_balancer['interval'],
                     target=elastic_load_balancer['health_check_target'],
                     timeout=elastic_load_balancer['timeout'])
# 
##For a complete list of options see http://boto.cloudhackers.com/ref/ec2.html#boto.ec2.elb.ELBConnection.create_load_balancer
lb = conn_elb.create_load_balancer(elastic_load_balancer['name'],
Example #37
0
class ELBConnectionTest(unittest.TestCase):
    ec2 = True

    def setUp(self):
        """Creates a named load balancer that can be safely
        deleted at the end of each test"""
        self.conn = ELBConnection()
        self.name = 'elb-boto-unit-test'
        self.availability_zones = ['us-east-1a']
        self.listeners = [(80, 8000, 'HTTP')]
        self.balancer = self.conn.create_load_balancer(self.name,
                                                       self.availability_zones,
                                                       self.listeners)

        # S3 bucket for log tests
        self.s3 = boto.connect_s3()
        self.timestamp = str(int(time.time()))
        self.bucket_name = 'boto-elb-%s' % self.timestamp
        self.bucket = self.s3.create_bucket(self.bucket_name)
        self.bucket.set_canned_acl('public-read-write')
        self.addCleanup(self.cleanup_bucket, self.bucket)

    def cleanup_bucket(self, bucket):
        for key in bucket.get_all_keys():
            key.delete()
        bucket.delete()

    def tearDown(self):
        """ Deletes the test load balancer after every test.
        It does not delete EVERY load balancer in your account"""
        self.balancer.delete()

    def test_build_list_params(self):
        params = {}
        self.conn.build_list_params(params, ['thing1', 'thing2', 'thing3'],
                                    'ThingName%d')
        expected_params = {
            'ThingName1': 'thing1',
            'ThingName2': 'thing2',
            'ThingName3': 'thing3'
        }
        self.assertEqual(params, expected_params)

    # TODO: for these next tests, consider sleeping until our load
    # balancer comes up, then testing for connectivity to
    # balancer.dns_name, along the lines of the existing EC2 unit tests.

    def test_create_load_balancer(self):
        self.assertEqual(self.balancer.name, self.name)
        self.assertEqual(self.balancer.availability_zones,
                         self.availability_zones)
        self.assertEqual(self.balancer.listeners, self.listeners)

        balancers = self.conn.get_all_load_balancers()
        self.assertEqual([lb.name for lb in balancers], [self.name])

    def test_create_load_balancer_listeners(self):
        more_listeners = [(443, 8001, 'HTTP')]
        self.conn.create_load_balancer_listeners(self.name, more_listeners)
        balancers = self.conn.get_all_load_balancers()
        self.assertEqual([lb.name for lb in balancers], [self.name])
        self.assertEqual(sorted(l.get_tuple() for l in balancers[0].listeners),
                         sorted(self.listeners + more_listeners))

    def test_delete_load_balancer_listeners(self):
        mod_listeners = [(80, 8000, 'HTTP'), (443, 8001, 'HTTP')]
        mod_name = self.name + "-mod"
        self.mod_balancer = self.conn.create_load_balancer(
            mod_name, self.availability_zones, mod_listeners)

        mod_balancers = self.conn.get_all_load_balancers(
            load_balancer_names=[mod_name])
        self.assertEqual([lb.name for lb in mod_balancers], [mod_name])
        self.assertEqual(
            sorted([l.get_tuple() for l in mod_balancers[0].listeners]),
            sorted(mod_listeners))

        self.conn.delete_load_balancer_listeners(self.mod_balancer.name, [443])
        mod_balancers = self.conn.get_all_load_balancers(
            load_balancer_names=[mod_name])
        self.assertEqual([lb.name for lb in mod_balancers], [mod_name])
        self.assertEqual([l.get_tuple() for l in mod_balancers[0].listeners],
                         mod_listeners[:1])
        self.mod_balancer.delete()

    def test_create_load_balancer_listeners_with_policies(self):
        more_listeners = [(443, 8001, 'HTTP')]
        self.conn.create_load_balancer_listeners(self.name, more_listeners)

        lb_policy_name = 'lb-policy'
        self.conn.create_lb_cookie_stickiness_policy(1000, self.name,
                                                     lb_policy_name)
        self.conn.set_lb_policies_of_listener(self.name, self.listeners[0][0],
                                              lb_policy_name)

        app_policy_name = 'app-policy'
        self.conn.create_app_cookie_stickiness_policy('appcookie', self.name,
                                                      app_policy_name)
        self.conn.set_lb_policies_of_listener(self.name, more_listeners[0][0],
                                              app_policy_name)

        balancers = self.conn.get_all_load_balancers(
            load_balancer_names=[self.name])
        self.assertEqual([lb.name for lb in balancers], [self.name])
        self.assertEqual(sorted(l.get_tuple() for l in balancers[0].listeners),
                         sorted(self.listeners + more_listeners))
        # Policy names should be checked here once they are supported
        # in the Listener object.

    def test_create_load_balancer_backend_with_policies(self):
        other_policy_name = 'enable-proxy-protocol'
        backend_port = 8081
        self.conn.create_lb_policy(self.name, other_policy_name,
                                   'ProxyProtocolPolicyType',
                                   {'ProxyProtocol': True})
        self.conn.set_lb_policies_of_backend_server(self.name, backend_port,
                                                    [other_policy_name])

        balancers = self.conn.get_all_load_balancers(
            load_balancer_names=[self.name])
        self.assertEqual([lb.name for lb in balancers], [self.name])
        self.assertEqual(len(balancers[0].policies.other_policies), 1)
        self.assertEqual(balancers[0].policies.other_policies[0].policy_name,
                         other_policy_name)
        self.assertEqual(len(balancers[0].backends), 1)
        self.assertEqual(balancers[0].backends[0].instance_port, backend_port)
        self.assertEqual(balancers[0].backends[0].policies[0].policy_name,
                         other_policy_name)

        self.conn.set_lb_policies_of_backend_server(self.name, backend_port,
                                                    [])

        balancers = self.conn.get_all_load_balancers(
            load_balancer_names=[self.name])
        self.assertEqual([lb.name for lb in balancers], [self.name])
        self.assertEqual(len(balancers[0].policies.other_policies), 1)
        self.assertEqual(len(balancers[0].backends), 0)

    def test_create_load_balancer_complex_listeners(self):
        complex_listeners = [
            (8080, 80, 'HTTP', 'HTTP'),
            (2525, 25, 'TCP', 'TCP'),
        ]

        self.conn.create_load_balancer_listeners(
            self.name, complex_listeners=complex_listeners)

        balancers = self.conn.get_all_load_balancers(
            load_balancer_names=[self.name])
        self.assertEqual([lb.name for lb in balancers], [self.name])
        self.assertEqual(
            sorted(l.get_complex_tuple() for l in balancers[0].listeners),
            # We need an extra 'HTTP' here over what ``self.listeners`` uses.
            sorted([(80, 8000, 'HTTP', 'HTTP')] + complex_listeners))

    def test_load_balancer_access_log(self):
        attributes = self.balancer.get_attributes()

        self.assertEqual(False, attributes.access_log.enabled)

        attributes.access_log.enabled = True
        attributes.access_log.s3_bucket_name = self.bucket_name
        attributes.access_log.s3_bucket_prefix = 'access-logs'
        attributes.access_log.emit_interval = 5

        self.conn.modify_lb_attribute(self.balancer.name, 'accessLog',
                                      attributes.access_log)

        new_attributes = self.balancer.get_attributes()

        self.assertEqual(True, new_attributes.access_log.enabled)
        self.assertEqual(self.bucket_name,
                         new_attributes.access_log.s3_bucket_name)
        self.assertEqual('access-logs',
                         new_attributes.access_log.s3_bucket_prefix)
        self.assertEqual(5, new_attributes.access_log.emit_interval)

    def test_load_balancer_get_attributes(self):
        attributes = self.balancer.get_attributes()
        connection_draining = self.conn.get_lb_attribute(
            self.balancer.name, 'ConnectionDraining')
        self.assertEqual(connection_draining.enabled,
                         attributes.connection_draining.enabled)
        self.assertEqual(connection_draining.timeout,
                         attributes.connection_draining.timeout)

        access_log = self.conn.get_lb_attribute(self.balancer.name,
                                                'AccessLog')
        self.assertEqual(access_log.enabled, attributes.access_log.enabled)
        self.assertEqual(access_log.s3_bucket_name,
                         attributes.access_log.s3_bucket_name)
        self.assertEqual(access_log.s3_bucket_prefix,
                         attributes.access_log.s3_bucket_prefix)
        self.assertEqual(access_log.emit_interval,
                         attributes.access_log.emit_interval)

        cross_zone_load_balancing = self.conn.get_lb_attribute(
            self.balancer.name, 'CrossZoneLoadBalancing')
        self.assertEqual(cross_zone_load_balancing,
                         attributes.cross_zone_load_balancing.enabled)

    def change_and_verify_load_balancer_connection_draining(
            self, enabled, timeout=None):
        attributes = self.balancer.get_attributes()

        attributes.connection_draining.enabled = enabled
        if timeout is not None:
            attributes.connection_draining.timeout = timeout

        self.conn.modify_lb_attribute(self.balancer.name, 'ConnectionDraining',
                                      attributes.connection_draining)

        attributes = self.balancer.get_attributes()
        self.assertEqual(enabled, attributes.connection_draining.enabled)
        if timeout is not None:
            self.assertEqual(timeout, attributes.connection_draining.timeout)

    def test_load_balancer_connection_draining_config(self):
        self.change_and_verify_load_balancer_connection_draining(True, 128)
        self.change_and_verify_load_balancer_connection_draining(True, 256)
        self.change_and_verify_load_balancer_connection_draining(False)
        self.change_and_verify_load_balancer_connection_draining(True, 64)

    def test_set_load_balancer_policies_of_listeners(self):
        more_listeners = [(443, 8001, 'HTTP')]
        self.conn.create_load_balancer_listeners(self.name, more_listeners)

        lb_policy_name = 'lb-policy'
        self.conn.create_lb_cookie_stickiness_policy(1000, self.name,
                                                     lb_policy_name)
        self.conn.set_lb_policies_of_listener(self.name, self.listeners[0][0],
                                              lb_policy_name)

        # Try to remove the policy by passing empty list.
        # http://docs.aws.amazon.com/ElasticLoadBalancing/latest/APIReference/API_SetLoadBalancerPoliciesOfListener.html
        # documents this as the way to remove policies.
        self.conn.set_lb_policies_of_listener(self.name, self.listeners[0][0],
                                              [])

    def test_can_make_sigv4_call(self):
        connection = boto.ec2.elb.connect_to_region('eu-central-1')
        lbs = connection.get_all_load_balancers()
        self.assertTrue(isinstance(lbs, list))
import boto.ec2
import os
from boto.ec2.elb import ELBConnection
from boto.ec2.elb import HealthCheck

from boto.ec2.autoscale import AutoScaleConnection
from boto.ec2.autoscale import LaunchConfiguration
from boto.ec2.autoscale import AutoScalingGroup
from boto.ec2.autoscale import ScalingPolicy

region = 'eu-west-1'
AWS_ACCESS_KEY = 'ASIARHS7F46CBBQQZMOW'
AWS_SECRET_KEY = 'qkvyeKfYmKlj3TiHYUkO+7P/zvF5nKD5vW/SiiN+'

conn_elb = ELBConnection(AWS_ACCESS_KEY, AWS_SECRET_KEY)
conn_as = AutoScaleConnection(AWS_ACCESS_KEY, AWS_SECRET_KEY)

elastic_load_balancer = {
    'name': 'load-balancer',
    'health_check_target': 'HTTP:80/index.html',#Loaction to perform health checks
    'connection_forwarding': [(80, 80, 'http'), (443, 443, 'tcp')]
}

autoscaling_group = {
    'name': 'web-server-auto-scaling-group',
    'min_size': 2,  # Since we want 2 Minimum instances running at all times
    'max_size': 4, #Maximum number of instances that should be running at all times

}

as_ami = {
Example #39
0
    def test_create_load_balancer_listeners_with_policies(self):
        c = ELBConnection()
        name = 'elb-boto-unit-test-policy'
        availability_zones = ['us-east-1a']
        listeners = [(80, 8000, 'HTTP')]
        balancer = c.create_load_balancer(name, availability_zones, listeners)

        more_listeners = [(443, 8001, 'HTTP')]
        c.create_load_balancer_listeners(name, more_listeners)

        lb_policy_name = 'lb-policy'
        c.create_lb_cookie_stickiness_policy(1000, name, lb_policy_name)
        c.set_lb_policies_of_listener(name, listeners[0][0], lb_policy_name)

        app_policy_name = 'app-policy'
        c.create_app_cookie_stickiness_policy('appcookie', name,
                                              app_policy_name)
        c.set_lb_policies_of_listener(name, more_listeners[0][0],
                                      app_policy_name)

        balancers = c.get_all_load_balancers()
        self.assertEqual([lb.name for lb in balancers], [name])
        self.assertEqual(sorted(l.get_tuple() for l in balancers[0].listeners),
                         sorted(listeners + more_listeners))
Example #40
0
 def tearDown(self):
     """ Deletes all load balancers after every test. """
     for lb in ELBConnection().get_all_load_balancers():
         lb.delete()
class BotoBalanceInterface(BalanceInterface):
    conn = None
    saveclcdata = False

    def __init__(self, clc_host, access_id, secret_key, token):
        #boto.set_stream_logger('foo')
        path = '/services/elb'
        port = 8773
        if clc_host[len(clc_host) - 13:] == 'amazonaws.com':
            clc_host = clc_host.replace('ec2', 'elasticloadbalancing', 1)
            path = '/'
            reg = None
            port = 443
        reg = RegionInfo(name='eucalyptus', endpoint=clc_host)
        if boto.__version__ < '2.6':
            self.conn = ELBConnection(access_id,
                                      secret_key,
                                      region=reg,
                                      port=port,
                                      path=path,
                                      is_secure=True,
                                      security_token=token,
                                      debug=0)
        else:
            self.conn = ELBConnection(access_id,
                                      secret_key,
                                      region=reg,
                                      port=port,
                                      path=path,
                                      validate_certs=False,
                                      is_secure=True,
                                      security_token=token,
                                      debug=0)
        self.conn.http_connection_kwargs['timeout'] = 30

    def __save_json__(self, obj, name):
        f = open(name, 'w')
        json.dump(obj, f, cls=BotoJsonBalanceEncoder, indent=2)
        f.close()

    def create_load_balancer(self,
                             name,
                             zones,
                             listeners,
                             subnets=None,
                             security_groups=None,
                             scheme='internet-facing'):
        return self.conn.create_load_balancer(name, zones, listeners, subnets,
                                              security_groups, scheme)

    def delete_load_balancer(self, name):
        return self.conn.delete_load_balancer(name)

    def get_all_load_balancers(self, load_balancer_names=None):
        return []
        obj = self.conn.get_all_load_balancers(load_balancer_names)
        if self.saveclcdata:
            self.__save_json__(obj, "mockdata/ELB_Balancers.json")
        return obj

    def deregister_instances(self, load_balancer_name, instances):
        return self.conn.deregister_instances(load_balancer_name, instances)

    def register_instances(self, load_balancer_name, instances):
        return self.conn.register_instances(load_balancer_name, instances)

    def create_load_balancer_listeners(self, name, listeners):
        return self.conn.create_load_balancer_listeners(name, listeners)

    def delete_load_balancer_listeners(self, name, ports):
        return self.conn.delete_load_balancer_listeners(name, ports)

    def configure_health_check(self, name, health_check):
        return self.conn.configure_health_check(name, health_check)

    def describe_instance_health(self, load_balancer_name, instances=None):
        obj = self.conn.describe_instance_health(load_balancer_name, instances)
        if self.saveclcdata:
            self.__save_json__(obj, "mockdata/ELB_Instances.json")
        return obj
Example #42
0
 def test_elb_expiration(self):
     c = ELBConnection(aws_access_key_id='aws_access_key_id',
                       aws_secret_access_key='aws_secret_access_key')
     self.assert_is_expired(c, GENERAL_EXPIRED, status=403)
     self.assert_is_not_expired(c, GENERIC_BAD_REQUEST, status=403)
     self.assert_is_not_expired(c, GENERIC_BAD_REQUEST, status=400)
Example #43
0
class EC2System(WrapanapiAPIBaseVM):
    """EC2 Management System, powered by boto

    Wraps the EC2 API and mimics the behavior of other implementors of
    MgmtServiceAPIBase for us in VM control testing

    Instead of username and password, accepts access_key_id and
    secret_access_key, the AWS analogs to those ideas. These are passed, along
    with any kwargs, straight through to boto's EC2 connection factory. This
    allows customization of the EC2 connection, to connect to another region,
    for example.

    For the purposes of the EC2 system, a VM's instance ID is its name because
    EC2 instances don't have to have unique names.

    Args:
        *kwargs: Arguments to connect, usually, username, password, region.
    Returns: A :py:class:`EC2System` object.
    """

    _stats_available = {
        'num_vm': lambda self: len(self.list_vm()),
        'num_template': lambda self: len(self.list_template()),
    }

    states = {
        'running': ('running', ),
        'stopped': ('stopped', 'terminated'),
        'suspended': (),
        'deleted': ('terminated', ),
    }

    can_suspend = False

    def __init__(self, **kwargs):
        super(EC2System, self).__init__(kwargs)
        username = kwargs.get('username')
        password = kwargs.get('password')

        regionname = kwargs.get('region')
        region = get_region(kwargs.get('region'))
        self.api = EC2Connection(username, password, region=region)
        self.sqs_connection = connection.SQSConnection(
            username,
            password,
            region=_regions(regionmodule=sqs, regionname=regionname))
        self.elb_connection = ELBConnection(username,
                                            password,
                                            region=_regions(
                                                regionmodule=elb,
                                                regionname=regionname))
        self.s3_connection = boto3.resource(
            's3',
            aws_access_key_id=username,
            aws_secret_access_key=password,
            region_name=regionname,
            config=Config(signature_version='s3v4'))
        self.ec2_connection = boto3.client(
            'ec2',
            aws_access_key_id=username,
            aws_secret_access_key=password,
            region_name=regionname,
            config=Config(signature_version='s3v4'))
        self.stackapi = CloudFormationConnection(
            username,
            password,
            region=_regions(regionmodule=cloudformation,
                            regionname=regionname))
        self.sns_connection = boto3.client('sns', region_name=regionname)
        self.kwargs = kwargs

    def disconnect(self):
        """Disconnect from the EC2 API -- NOOP

        AWS EC2 service is stateless, so there's nothing to disconnect from
        """
        pass

    def info(self):
        """Returns the current versions of boto and the EC2 API being used"""
        return '%s %s' % (boto.UserAgent, self.api.APIVersion)

    def list_vm(self, include_terminated=True):
        """Returns a list from instance IDs currently active on EC2 (not terminated)"""
        instances = None
        if include_terminated:
            instances = [inst for inst in self._get_all_instances()]
        else:
            instances = [
                inst for inst in self._get_all_instances()
                if inst.state != 'terminated'
            ]
        return [i.tags.get('Name', i.id) for i in instances]

    def list_template(self):
        private_images = self.api.get_all_images(
            owners=['self'], filters={'image-type': 'machine'})
        shared_images = self.api.get_all_images(
            executable_by=['self'], filters={'image-type': 'machine'})
        combined_images = list(set(private_images) | set(shared_images))
        # Try to pull the image name (might not exist), falling back on ID (must exist)
        return map(lambda i: i.name or i.id, combined_images)

    def list_flavor(self):
        raise NotImplementedError(
            'This function is not supported on this platform.')

    def vm_status(self, instance_id):
        """Returns the status of the requested instance

        Args:
            instance_id: ID of the instance to inspect
        Returns: Instance status.

        See this `page <http://docs.aws.amazon.com/AWSEC2/latest/APIReference/
        ApiReference-ItemType-InstanceStateType.html>`_ for possible return values.

        """
        instance = self._get_instance(instance_id)
        return instance.state

    def vm_type(self, instance_id):
        """Returns the instance type of the requested instance
            e.g. m1.medium, m3.medium etc..

                Args:
                    instance_id: ID of the instance to inspect
                Returns: Instance type.
        """
        instance = self._get_instance(instance_id)
        return instance.instance_type

    def vm_creation_time(self, instance_id):
        instance = self._get_instance(instance_id)
        # Example instance.launch_time: 2014-08-13T22:09:40.000Z
        launch_time = datetime.strptime(instance.launch_time,
                                        '%Y-%m-%dT%H:%M:%S.%fZ')
        # use replace here to make tz-aware. python doesn't handle single 'Z' as UTC
        return launch_time.replace(tzinfo=pytz.UTC)

    def create_vm(self,
                  image_id,
                  min_count=1,
                  max_count=1,
                  instance_type='t1.micro',
                  vm_name=''):
        """
            Creates aws instances.
        TODO:
            Check whether instances were really created.
            Add additional arguments to be able to modify settings for instance creation.
        Args:
            image_id: ID of AMI
            min_count: Minimal count of instances - useful only if creating thousand of instances
            max_count: Maximal count of instances - defaults to 1
            instance_type: Type of instances, catalog of instance types is here:
                https://aws.amazon.com/ec2/instance-types/
                Defaults to 't1.micro' which is the least expensive instance type

            vm_name: Name of instances, can be blank

        Returns:
            List of created aws instances' IDs.
        """
        self.logger.info(
            " Creating instances[%d] with name %s,type %s and image ID: %s ",
            max_count, vm_name, instance_type, image_id)
        try:
            result = self.ec2_connection.run_instances(
                ImageId=image_id,
                MinCount=min_count,
                MaxCount=max_count,
                InstanceType=instance_type,
                TagSpecifications=[
                    {
                        'ResourceType': 'instance',
                        'Tags': [
                            {
                                'Key': 'Name',
                                'Value': vm_name,
                            },
                        ]
                    },
                ])
            instances = result.get('Instances')
            instance_ids = []
            for instance in instances:
                instance_ids.append(instance.get('InstanceId'))
            return instance_ids
        except Exception:
            self.logger.exception(
                "Create of {} instance failed.".format(vm_name))
            return None

    def delete_vm(self, instance_id):
        """Deletes the an instance

        Args:
            instance_id: ID of the instance to act on
        Returns: Whether or not the backend reports the action completed
        """
        self.logger.info(" Terminating EC2 instance %s" % instance_id)
        instance_id = self._get_instance_id_by_name(instance_id)
        try:
            self.api.terminate_instances([instance_id])
            self._block_until(instance_id, self.states['deleted'])
            return True
        except ActionTimedOutError:
            return False

    def describe_stack(self, stack_name):
        """Describe stackapi

        Returns the description for the specified stack
        Args:
            stack_name: Unique name of stack
        """
        result = []
        stacks = self.stackapi.describe_stacks(stack_name)
        result.extend(stacks)
        return result

    def stack_exist(self, stack_name):
        stacks = [
            stack for stack in self.describe_stack(stack_name)
            if stack.stack_name == stack_name
        ]
        if stacks:
            return bool(stacks)

    def delete_stack(self, stack_name):
        """Deletes stack

        Args:
            stack_name: Unique name of stack
        """
        self.logger.info(" Terminating EC2 stack {}".format(stack_name))
        try:
            self.stackapi.delete_stack(stack_name)
            return True
        except ActionTimedOutError:
            return False

    def start_vm(self, instance_id):
        """Start an instance

        Args:
            instance_id: ID of the instance to act on
        Returns: Whether or not the backend reports the action completed
        """
        self.logger.info(" Starting EC2 instance %s" % instance_id)
        instance_id = self._get_instance_id_by_name(instance_id)
        try:
            self.api.start_instances([instance_id])
            self._block_until(instance_id, self.states['running'])
            return True
        except ActionTimedOutError:
            return False

    def stop_vm(self, instance_id):
        """Stop an instance

        Args:
            instance_id: ID of the instance to act on
        Returns: Whether or not the backend reports the action completed
        """
        self.logger.info(" Stopping EC2 instance %s" % instance_id)
        instance_id = self._get_instance_id_by_name(instance_id)
        try:
            self.api.stop_instances([instance_id])
            self._block_until(instance_id, self.states['stopped'], timeout=360)
            return True
        except ActionTimedOutError:
            return False

    def restart_vm(self, instance_id):
        """Restart an instance

        Args:
            instance_id: ID of the instance to act on
        Returns: Whether or not the backend reports the action completed

        The action is taken in two separate calls to EC2. A 'False' return can
        indicate a failure of either the stop action or the start action.

        Note: There is a reboot_instances call available on the API, but it provides
            less insight than blocking on stop_vm and start_vm. Furthermore,
            there is no "rebooting" state, so there are potential monitoring
            issues that are avoided by completing these steps atomically
        """
        self.logger.info(" Restarting EC2 instance %s" % instance_id)
        return self.stop_vm(instance_id) and self.start_vm(instance_id)

    def is_vm_state(self, instance_id, state):
        return self.vm_status(instance_id) in state

    def is_vm_running(self, instance_id):
        """Is the VM running?

        Args:
            instance_id: ID of the instance to inspect
        Returns: Whether or not the requested instance is running
        """
        try:
            running = self.vm_status(instance_id) in self.states['running']
            return running
        except:
            return False

    def wait_vm_running(self, instance_id, num_sec=360):
        self.logger.info(
            " Waiting for EC2 instance %s to change status to running" %
            instance_id)
        wait_for(self.is_vm_running, [instance_id], num_sec=num_sec)

    def is_vm_stopped(self, instance_id):
        """Is the VM stopped?

        Args:
            instance_id: ID of the instance to inspect
        Returns: Whether or not the requested instance is stopped
        """
        return self.vm_status(instance_id) in self.states['stopped']

    def wait_vm_stopped(self, instance_id, num_sec=360):
        self.logger.info(
            " Waiting for EC2 instance %s to change status to stopped or terminated"
            % instance_id)
        wait_for(self.is_vm_stopped, [instance_id], num_sec=num_sec)

    def suspend_vm(self, instance_id):
        """Suspend a VM: Unsupported by EC2

        Args:
            instance_id: ID of the instance to act on
        Raises:
            ActionNotSupported: The action is not supported on the system
        """
        raise ActionNotSupported()

    def is_vm_suspended(self, instance_id):
        """Is the VM suspended? We'll never know because EC2 don't support this.

        Args:
            instance_id: ID of the instance to inspect
        Raises:
            ActionNotSupported: The action is not supported on the system
        """
        raise ActionNotSupported()

    def wait_vm_suspended(self, instance_id, num_sec):
        """We would wait forever - EC2 doesn't support this.

        Args:
            instance_id: ID of the instance to wait for
        Raises:
            ActionNotSupported: The action is not supported on the system
        """
        raise ActionNotSupported()

    def clone_vm(self, source_name, vm_name):
        raise NotImplementedError(
            'This function has not yet been implemented.')

    def deploy_template(self, template, *args, **kwargs):
        """Instantiate the requested template image (ami id)

        Accepts args/kwargs from boto's
        :py:meth:`run_instances<boto:boto.ec2.connection.EC2Connection.run_instances>` method

        Most important args are listed below.

        Args:
            template: Template name (AMI ID) to instantiate
            vm_name: Name of the instance (Name tag to set)
            instance_type: Type (flavor) of the instance

        Returns: Instance ID of the created instance

        Note: min_count and max_count args will be forced to '1'; if you're trying to do
              anything fancier than that, you might be in the wrong place

        """
        # Enforce create_vm only creating one VM
        self.logger.info(" Deploying EC2 template %s" % template)

        # strip out kwargs that ec2 doesn't understand
        timeout = kwargs.pop('timeout', 900)
        vm_name = kwargs.pop('vm_name', None)
        power_on = kwargs.pop('power_on', True)

        # Make sure we only provision one VM
        kwargs.update({'min_count': 1, 'max_count': 1})

        # sanity-check inputs
        if 'instance_type' not in kwargs:
            kwargs['instance_type'] = 'm1.small'
        if not template.startswith('ami'):
            # assume this is a lookup by name, get the ami id
            template = self._get_ami_id_by_name(template)

        # clone!
        reservation = self.api.run_instances(template, *args, **kwargs)
        instances = self._get_instances_from_reservations([reservation])
        # Should have only made one VM; return its ID for use in other methods
        self.wait_vm_running(instances[0].id, num_sec=timeout)

        if vm_name:
            self.set_name(instances[0].id, vm_name)
        if power_on:
            self.start_vm(instances[0].id)
        return instances[0].id

    def set_name(self, instance_id, new_name):
        self.logger.info("Setting name of EC2 instance %s to %s" %
                         (instance_id, new_name))
        instance = self._get_instance(instance_id)
        instance.add_tag('Name', new_name)
        return new_name

    def get_name(self, instance_id):
        return self._get_instance(instance_id).tags.get('Name', instance_id)

    def _get_instance(self, instance_id):
        instance_id = self._get_instance_id_by_name(instance_id)
        reservations = self.api.get_all_instances([instance_id])
        instances = self._get_instances_from_reservations(reservations)
        if len(instances) > 1:
            raise MultipleInstancesError

        try:
            return instances[0]
        except KeyError:
            return None

    def current_ip_address(self, instance_id):
        return str(self._get_instance(instance_id).ip_address)

    def get_ip_address(self, instance_id, **kwargs):
        return self.current_ip_address(instance_id)

    def _get_instance_id_by_name(self, instance_name):
        # Quick validation that the instance name isn't actually an ID
        # If people start naming their instances in such a way to break this,
        # check, that would be silly, but we can upgrade to regex if necessary.
        pattern = re.compile('^i-\w{8,17}$')
        if pattern.match(instance_name):
            return instance_name

        # Filter by the 'Name' tag
        filters = {
            'tag:Name': instance_name,
        }
        reservations = self.api.get_all_instances(filters=filters)
        instances = self._get_instances_from_reservations(reservations)
        if not instances:
            raise VMInstanceNotFound(instance_name)
        elif len(instances) > 1:
            raise MultipleInstancesError('Instance name "%s" is not unique' %
                                         instance_name)

        # We have an instance! return its ID
        return instances[0].id

    def _get_ami_id_by_name(self, image_name):
        matches = self.api.get_all_images(filters={'name': image_name})
        if not matches:
            raise ImageNotFoundError(image_name)
        elif len(matches) > 1:
            raise MultipleImagesError(
                'Template name %s returned more than one image_name. '
                'Use the ami-ID or remove duplicates from EC2' % image_name)

        return matches[0].id

    def does_vm_exist(self, name):
        try:
            self._get_instance_id_by_name(name)
            return True
        except MultipleInstancesError:
            return True
        except VMInstanceNotFound:
            return False

    def _get_instances_from_reservations(self, reservations):
        """Takes a sequence of reservations and returns their instances"""
        instances = list()
        for reservation in reservations:
            for instance in reservation.instances:
                instances.append(instance)
        return instances

    def _get_all_instances(self):
        """Gets all instances that EC2 can see"""
        reservations = self.api.get_all_instances()
        instances = self._get_instances_from_reservations(reservations)
        return instances

    def _block_until(self, instance_id, expected, timeout=90):
        """Blocks until the given instance is in one of the expected states

        Takes an optional timeout value.
        """
        wait_for(lambda: self.vm_status(instance_id) in expected,
                 num_sec=timeout)

    def remove_host_from_cluster(self, hostname):
        raise NotImplementedError('remove_host_from_cluster not implemented')

    def create_s3_bucket(self, bucket_name):
        self.logger.info("Creating bucket: {}".format(bucket_name))
        try:
            self.s3_connection.create_bucket(Bucket=bucket_name,
                                             CreateBucketConfiguration={
                                                 'LocationConstraint':
                                                 self.kwargs.get('region')
                                             })
            self.logger.info("Success: Bucket was successfully created.")
            return True
        except Exception:
            self.logger.exception(
                "Error: Bucket was not successfully created.")
            return False

    def upload_file_to_s3_bucket(self, bucket_name, file_path, file_name):
        bucket = self.s3_connection.Bucket(bucket_name)
        self.logger.info("uploading file {} to bucket: {}".format(
            file_path, bucket_name))
        if os.path.isfile(file_path):
            try:
                bucket.upload_file(file_path, file_name)
                self.logger.info("Success: uploading file completed")
                return True
            except Exception:
                self.logger.exception("File upload failed.")
                return False
        else:
            self.logger.error("Error: File to upload does not exist.")
            return False

    def object_exists_in_bucket(self, bucket_name, object_key):
        bucket = self.s3_connection.Bucket(name=bucket_name)
        objects = [o for o in bucket.objects.all() if o.key == object_key]
        return any(objects)

    def delete_s3_bucket(self, bucket_name):
        """TODO: Force delete - delete all objects and then bucket"""
        bucket = self.s3_connection.Bucket(bucket_name)
        self.logger.info("Trying to delete bucket {}".format(bucket_name))
        try:
            bucket.delete()
            self.logger.info(
                "Success: bucket {} was deleted.".format(bucket_name))
            return True
        except Exception:
            self.logger.exception(
                "Bucket {} deletion failed".format(bucket_name))
            return False

    def delete_objects_from_s3_bucket(self, bucket_name, object_keys):
        """Delete each of the given object_keys from the given bucket"""
        if not isinstance(object_keys, list):
            raise ValueError(
                "object_keys argument must be a list of key strings")
        bucket = self.s3_connection.Bucket(name=bucket_name)
        try:
            bucket.delete_objects(Delete={
                'Objects': [{
                    'Key': object_key
                } for object_key in object_keys]
            })
            return True
        except Exception:
            self.logger.exception(
                'Deleting object keys {} from Bucket "{}" failed'.format(
                    object_keys, bucket_name))
            return False

    def get_all_disassociated_addresses(self):
        return [
            addr for addr in self.api.get_all_addresses()
            if not addr.instance_id and not addr.network_interface_id
        ]

    def release_vpc_address(self, alloc_id):
        self.logger.info(" Releasing EC2 VPC EIP {}".format(str(alloc_id)))
        try:
            self.api.release_address(allocation_id=alloc_id)
            return True

        except ActionTimedOutError:
            return False

    def release_address(self, address):
        self.logger.info(" Releasing EC2-CLASSIC EIP {}".format(address))
        try:
            self.api.release_address(public_ip=address)
            return True

        except ActionTimedOutError:
            return False

    def get_all_unattached_volumes(self):
        return [
            volume for volume in self.api.get_all_volumes()
            if not volume.attach_data.status
        ]

    def delete_sqs_queue(self, queue_name):
        self.logger.info(" Deleting SQS queue {}".format(queue_name))
        try:
            queue = self.sqs_connection.get_queue(queue_name=queue_name)
            if queue:
                self.sqs_connection.delete_queue(queue=queue)
                return True
            else:
                return False

        except ActionTimedOutError:
            return False

    def get_all_unused_loadbalancers(self):
        return [
            loadbalancer
            for loadbalancer in self.elb_connection.get_all_load_balancers()
            if not loadbalancer.instances
        ]

    def delete_loadbalancer(self, loadbalancer):
        self.logger.info(" Deleting Elastic Load Balancer {}".format(
            loadbalancer.name))
        try:
            self.elb_connection.delete_load_balancer(loadbalancer.name)
            return True

        except ActionTimedOutError:
            return False

    def get_all_unused_network_interfaces(self):
        return [
            eni for eni in self.api.get_all_network_interfaces()
            if eni.status == "available"
        ]

    def import_image(self, s3bucket, s3key, format="vhd", description=None):
        self.logger.info(
            " Importing image %s from %s bucket with description %s in %s started "
            "successfully.", s3key, s3bucket, description, format)
        try:
            result = self.ec2_connection.import_image(DiskContainers=[{
                'Description':
                description if description is not None else s3key,
                'Format':
                format,
                'UserBucket': {
                    'S3Bucket': s3bucket,
                    'S3Key': s3key
                }
            }])
            task_id = result.get("ImportTaskId")
            return task_id

        except Exception:
            self.logger.exception("Import of {} image failed.".format(s3key))
            return False

    def get_import_image_task(self, task_id):
        result = self.ec2_connection.describe_import_image_tasks(
            ImportTaskIds=[task_id])
        result_task = result.get("ImportImageTasks")
        return result_task[0]

    def get_image_id_if_import_completed(self, task_id):
        result = self.get_import_image_task(task_id)
        result_status = result.get("Status")
        if result_status == 'completed':
            return result.get("ImageId")
        else:
            return False

    def copy_image(self, source_region, source_image, image_id):
        self.logger.info(
            " Copying image %s from region %s to region %s with image id %s",
            source_image, source_region, self.kwargs.get('region'), image_id)
        try:
            self.ec2_connection.copy_image(SourceRegion=source_region,
                                           SourceImageId=source_image,
                                           Name=image_id)
            return True

        except Exception:
            self.logger.exception(
                "Copy of {} image failed.".format(source_image))
            return False

    def deregister_image(self, image_id, delete_snapshot=True):
        """Deregister the given AMI ID, only valid for self owned AMI's"""
        images = self.api.get_all_images(owners=['self'],
                                         filters={'image-type': 'machine'})
        matching_images = [image for image in images if image.id == image_id]

        try:
            for image in matching_images:
                image.deregister(delete_snapshot=delete_snapshot)
            return True
        except Exception:
            self.logger.exception(
                'Deregister of image_id {} failed'.format(image_id))
            return False

    def list_topics(self):
        return self.sns_connection.list_topics()

    def get_arn_if_topic_exists(self, topic_name):
        topics = self.list_topics()

        # There is no way to get topic_name, so it
        # has to be parsed from ARN, which looks
        # like this: arn:aws:sns:sa-east-1:ACCOUNT_NUM:AWSConfig_topic

        topic_found = [
            t.get('TopicArn') for t in topics.get('Topics')
            if t.get('TopicArn').split(':')[-1] == topic_name
        ]
        if topic_found:
            return topic_found[0]
        else:
            return False

    def delete_topic(self, arn):
        self.logger.info(" Deleting SNS Topic {} ".format(arn))
        try:
            self.sns_connection.delete_topic(TopicArn=arn)
            return True

        except Exception:
            self.logger.exception("Delete of {} topic failed.".format(arn))
            return False

    def volume_exists_and_available(self, volume_name=None, volume_id=None):
        """
        Method for checking existence and availability state for volume

        Args:
            volume_name: Name of volume, if not set volume_id must be set
            volume_id: ID of volume in format vol-random_chars, if not set volume_name must be set

        Returns:
            True if volume exists and is available.
            False if volume doesn't exist or is not available.
        """
        if volume_id:
            try:
                response = self.ec2_connection.describe_volumes(
                    VolumeIds=[volume_id],
                    Filters=[{
                        'Name': 'status',
                        'Values': ['available']
                    }])
                if response.get('Volumes'):
                    return True
                else:
                    return False
            except Exception:
                return False
        elif volume_name:
            response = self.ec2_connection.describe_volumes(
                Filters=[{
                    'Name': 'status',
                    'Values': ['available']
                }, {
                    'Name': 'tag:Name',
                    'Values': [volume_name]
                }])
            if response.get('Volumes'):
                return True
            else:
                return False
        else:
            raise TypeError(
                "Neither volume_name nor volume_id were specified.")

    def snapshot_exists(self, snapshot_name=None, snapshot_id=None):
        """
        Method for checking existence of snapshot.

        Args:
            snapshot_name: Name of snapshot, if not set snapshot_id must be set.
            snapshot_id: Id of snapshot in format snap-random_chars, if not set snapshot_name
            must be set.

        Returns:
            True if snapshot exists.
            False if snapshot doesn't exist.
        """
        if snapshot_id:
            try:
                response = self.ec2_connection.describe_snapshots(
                    SnapshotIds=[snapshot_id])
                if response.get('Snapshots'):
                    return True
                else:
                    return False
            except Exception:
                return False
        elif snapshot_name:
            response = self.ec2_connection.describe_snapshots(
                Filters=[{
                    'Name': 'tag:Name',
                    'Values': [snapshot_name]
                }])
            if response.get('Snapshots'):
                return True
            else:
                return False
        else:
            raise TypeError(
                "Neither snapshot_name nor snapshot_id were specified.")

    def copy_snapshot(self, source_snapshot_id, source_region=None):
        """
        This method is not working properly because of bug in boto3.
        It creates new snapshot with empty size and error.
        Args:
            source_snapshot_id: Id of source snapshot in format snap-random_chars
            source_region: Source region, if not set then ec2_connection region

        Returns:
            True when snapshot copy started successfully.
            False when snapshot copy didn't start.
        """
        if not source_region:
            source_region = self.kwargs.get('region')
        try:
            self.ec2_connection.copy_snapshot(
                SourceRegion=source_region,
                SourceSnapshotId=source_snapshot_id,
                DestinationRegion=source_region)
            return True
        except Exception:
            self.logger.exception(
                "Copy snapshot with id {} failed.".format(source_snapshot_id))
            return False
Example #44
0
class EC2System(System, VmMixin, TemplateMixin, StackMixin):
    """EC2 Management System, powered by boto

    Wraps the EC2 API

    Instead of username and password, accepts access_key_id and
    secret_access_key, the AWS analogs to those ideas. These are passed, along
    with any kwargs, straight through to boto's EC2 connection factory. This
    allows customization of the EC2 connection, to connect to another region,
    for example.

    For the purposes of the EC2 system, a VM's instance ID is its name because
    EC2 instances don't have to have unique names.

    Args:
        *kwargs: Arguments to connect, usually, username, password, region.
    Returns: A :py:class:`EC2System` object.
    """

    _stats_available = {
        'num_vm': lambda self: len(self.list_vms(hide_deleted=False)),
        'num_template': lambda self: len(self.list_templates()),
    }

    can_suspend = False
    can_pause = False

    def __init__(self, **kwargs):
        super(EC2System, self).__init__(**kwargs)
        self._username = kwargs.get('username')
        self._password = kwargs.get('password')
        connection_config = Config(signature_version='s3v4',
                                   retries=dict(max_attempts=10))

        self._region_name = kwargs.get('region')
        self._region = get_region(self._region_name)
        self.api = EC2Connection(self._username,
                                 self._password,
                                 region=self._region)

        self.sqs_connection = connection.SQSConnection(
            self._username,
            self._password,
            region=_regions(regionmodule=sqs, regionname=self._region_name))

        self.elb_connection = ELBConnection(self._username,
                                            self._password,
                                            region=_regions(
                                                regionmodule=elb,
                                                regionname=self._region_name))

        self.s3_connection = boto3.resource(
            's3',
            aws_access_key_id=self._username,
            aws_secret_access_key=self._password,
            region_name=self._region_name,
            config=connection_config)

        self.ec2_connection = boto3.client(
            'ec2',
            aws_access_key_id=self._username,
            aws_secret_access_key=self._password,
            region_name=self._region_name,
            config=connection_config)

        self.cloudformation_connection = boto3.client(
            'cloudformation',
            aws_access_key_id=self._username,
            aws_secret_access_key=self._password,
            region_name=self._region_name,
            config=connection_config)

        self.sns_connection = boto3.client('sns',
                                           region_name=self._region_name)

        self.kwargs = kwargs

    @property
    def _identifying_attrs(self):
        return {
            'username': self._username,
            'password': self._password,
            'region': self._region_name
        }

    @property
    def can_suspend(self):
        return False

    @property
    def can_pause(self):
        return False

    def disconnect(self):
        """Disconnect from the EC2 API -- NOOP

        AWS EC2 service is stateless, so there's nothing to disconnect from
        """
        pass

    def info(self):
        """Returns the current versions of boto and the EC2 API being used"""
        return '%s %s' % (boto.UserAgent, self.api.APIVersion)

    def _get_instances(self, **kwargs):
        """
        Gets instance reservations and parses instance objects
        """
        reservations = self.api.get_all_instances(**kwargs)
        instances = list()
        for reservation in reservations:
            for instance in reservation.instances:
                instances.append(EC2Instance(system=self, raw=instance))
        return instances

    @staticmethod
    def _add_filter_for_terminated(kwargs_dict):
        new_filter = {
            'instance-state-name': [
                api_state
                for api_state, vm_state in EC2Instance.state_map.items()
                if vm_state is not VmState.DELETED
            ]
        }
        if 'filters' not in kwargs_dict:
            kwargs_dict['filters'] = new_filter
        else:
            kwargs_dict['filters'].update(new_filter)
        return kwargs_dict

    def find_vms(self, name=None, id=None, filters=None, hide_deleted=True):
        """
        Find instance on ec2 system

        Supported queries include searching by name tag, id, or passing
        in a specific filters dict to the system API. You can only
        select one of these methods.

        Args:
            name (str): name of instance (which is a tag)
            id (str): id of instance
            filters (dict): filters to pass along to system.api.get_all_instances()
            hide_deleted: do not list an instance if it has been terminated

        Returns:
            List of EC2Instance objects that match
        """
        # Validate args
        filled_args = [arg for arg in (
            name,
            id,
            filters,
        ) if arg]
        if not filled_args or len(filled_args) > 1:
            raise ValueError(
                "You must select one of these search methods: name, id, or filters"
            )

        if id:
            kwargs = {'instance_ids': [id]}
        elif filters:
            kwargs = {'filters': filters}
        elif name:
            # Quick validation that the instance name isn't actually an ID
            pattern = re.compile(r'^i-\w{8,17}$')
            if pattern.match(name):
                # Switch to using the id search method
                kwargs = {'instance_ids': [name]}
            else:
                kwargs = {'filters': {'tag:Name': name}}

        if hide_deleted:
            self._add_filter_for_terminated(kwargs)

        instances = self._get_instances(**kwargs)

        return instances

    def get_vm(self, name, hide_deleted=True):
        """
        Get a single EC2Instance with name or id equal to 'name'

        Must be a unique name

        Args:
            name: name or id of instance
        Returns:
            EC2Instance object
        Raises:
            VMInstanceNotFound if no instance exists with this name/id
            MultipleInstancesError if name is not unique
        """
        instances = self.find_vms(name=name, hide_deleted=hide_deleted)
        if not instances:
            raise VMInstanceNotFound(name)
        elif len(instances) > 1:
            raise MultipleInstancesError('Instance name "%s" is not unique' %
                                         name)
        return instances[0]

    def list_vms(self, hide_deleted=True):
        """
        Returns a list of instances currently active on EC2 (not terminated)
        """
        kwargs = {}
        if hide_deleted:
            self._add_filter_for_terminated(kwargs)
        return [inst for inst in self._get_instances(**kwargs)]

    def create_vm(self,
                  image_id,
                  min_count=1,
                  max_count=1,
                  instance_type='t1.micro',
                  vm_name='',
                  **kwargs):
        """
        Creates aws instances.

        TODO:
            Check whether instances were really created.
            Add additional arguments to be able to modify settings for instance creation.
        Args:
            image_id: ID of AMI
            min_count: Minimal count of instances - useful only if creating thousand of instances
            max_count: Maximal count of instances - defaults to 1
            instance_type: Type of instances, catalog of instance types is here:
                https://aws.amazon.com/ec2/instance-types/
                Defaults to 't1.micro' which is the least expensive instance type

            vm_name: Name of instances, can be blank

        Returns:
            List of EC2Instance objects for all instances created
        """
        self.logger.debug("ec2.create_vm() -- Ignored kwargs: %s", kwargs)
        self.logger.info(
            "Creating instances[%d] with name %s,type %s and image ID: %s ",
            max_count, vm_name, instance_type, image_id)
        try:
            result = self.ec2_connection.run_instances(
                ImageId=image_id,
                MinCount=min_count,
                MaxCount=max_count,
                InstanceType=instance_type,
                TagSpecifications=[
                    {
                        'ResourceType': 'instance',
                        'Tags': [
                            {
                                'Key': 'Name',
                                'Value': vm_name,
                            },
                        ]
                    },
                ])
        except Exception:
            self.logger.exception("Create of instance '%s' failed.", vm_name)
            raise

        try:
            instances_json = result['Instances']
            instance_ids = [entry['InstanceId'] for entry in instances_json]
        except KeyError:
            self.logger.exception(
                "Unable to parse all InstanceId's from response json")
            raise

        instances = [
            EC2Instance(system=self, uuid=uuid) for uuid in instance_ids
        ]
        for instance in instances:
            self.logger.info("Waiting for instance '%s' to reach steady state",
                             instance.uuid)
            instance.wait_for_steady_state()
        if len(instances) == 1:
            return instances[0]
        else:
            return instances

    def list_stacks(self, stack_status_filter=StackStates.ACTIVE):
        """
        Returns a list of Stack objects

        stack_status_filter:  list of stack statuses to filter for. See ``StackStates``
        """
        stack_list = [
            CloudFormationStack(system=self, uuid=stack_summary['StackId'])
            for stack_summary in self.cloudformation_connection.list_stacks()
            ['StackSummaries']
            if stack_summary['StackStatus'] in stack_status_filter
        ]
        return stack_list

    def find_stacks(self, name=None, id=None):
        """
        Return list of all stacks with given name or id

        According to boto3 docs, you can use name or ID in these situations:

        "Running stacks: You can specify either the stack's name or its unique stack ID.
        Deleted stacks: You must specify the unique stack ID."

        If 'name' kwarg is given and we fail to locate the stack initially, we will retry with
        'list_stacks' to get the list of all stacks with this name (even if they are deleted)

        If 'id' kwarg is given and we hit an error finding it, we don't call list_stacks. This
        is the more efficient kwarg to use if you are searching specifically by id.

        Args:
            name: name to search for
            id: id to search for
        Returns:
            List of CloudFormationStack objects
        """
        if not name and not id:
            raise ValueError('missing one of required kwargs: name, id')

        if name:
            searching_by_name = True
            name_or_id = name
        elif id:
            searching_by_name = False
            name_or_id = id

        stack_list = []
        try:
            # Try to find by name/id directly by using describe_stacks
            stack_list = [
                CloudFormationStack(system=self,
                                    uuid=stack['StackId'],
                                    raw=stack)
                for stack in self.cloudformation_connection.describe_stacks(
                    StackName=name_or_id)['Stacks']
            ]
        except ClientError as error:
            # Stack not found, if searching by name, look through deleted stacks...
            if searching_by_name and 'Stack with id {} does not exist'.format(
                    name) in str(error):
                stack_list = [
                    CloudFormationStack(system=self,
                                        uuid=stack_summary['StackId'])
                    for stack_summary in self.cloudformation_connection.
                    list_stacks()['StackSummaries']
                    if stack_summary['StackName'] == name
                ]
        return stack_list

    def get_stack(self, name):
        """
        Get single stack if it exists

        Args:
            name: unique name or id of the stack
        Returns:
            CloudFormationStack object
        """
        stacks = self.find_stacks(name)
        if not stacks:
            raise NotFoundError("Stack with name {} not found".format(name))
        elif len(stacks) > 1:
            raise MultipleItemsError(
                "Multiple stacks with name {} found".format(name))
        return stacks[0]

    def list_templates(self,
                       executable_by_me=True,
                       owned_only_by_me=False,
                       public=False):
        """
        List images on ec2 of image-type 'machine'

        Args:
            executable_by_me: search images executable by me (default True)
            owned_only_by_me: search images owned only by me (default False)
            public: search public images (default False)
        """
        img_filter = {'image-type': 'machine'}

        if public:
            images = self.api.get_all_images(filters=img_filter)
        elif executable_by_me:
            images = self.api.get_all_images(executable_by=['self'],
                                             filters=img_filter)
        elif owned_only_by_me:
            images = self.api.get_all_images(owners=['self'],
                                             filters=img_filter)
        else:
            raise ValueError(
                "One of the following must be 'True': owned_by_me, executable_by_me, public"
            )

        return [EC2Image(system=self, raw=image) for image in images]

    def find_templates(self,
                       name=None,
                       id=None,
                       executable_by_me=True,
                       owned_only_by_me=False,
                       public=False,
                       filters=None):
        """
        Find image on ec2 system

        Supported queries include searching by name, id, or passing
        in a specific filters dict to the system API. You can only
        select one of these methods.

        Args:
            name (str): name of image
            id (str): id of image
            executable_by_me: search images executable by me (default True)
            owned_only_by_me: search images owned only by me (default False)
            public: search public images (default False)
            filters (dict): optional filters to pass along to system.api.get_all_images()

        Returns:
            List of EC2Image objects that match
        """
        # Validate args
        filled_args = [arg for arg in (
            name,
            id,
            filters,
        ) if arg]
        if not filled_args or len(filled_args) > 1:
            raise ValueError(
                "You must select one of these search methods: name, id, or filters"
            )

        if id:
            kwargs = {'image_ids': [id]}
        elif filters:
            kwargs = {'filters': filters}
        elif name:
            # Quick validation that the image name isn't actually an ID
            if name.startswith('ami-'):
                # Switch to using the id search method
                kwargs = {'image_ids': [name]}
            else:
                kwargs = {'filters': {'name': name}}

        if public:
            images = self.api.get_all_images(**kwargs)
        elif executable_by_me:
            images = self.api.get_all_images(executable_by=['self'], **kwargs)
        elif owned_only_by_me:
            images = self.api.get_all_images(owners=['self'], **kwargs)
        else:
            raise ValueError(
                "One of the following must be 'True': owned_by_me, executable_by_me, public"
            )

        return [EC2Image(system=self, raw=image) for image in images]

    def get_template(self, name_or_id):
        matches = self.find_templates(name=name_or_id)
        if not matches:
            raise ImageNotFoundError(
                'Unable to find image {}'.format(name_or_id))
        elif len(matches) > 1:
            raise MultipleImagesError(
                'Image name {} returned more than one image '
                'Use the ami-ID or remove duplicates from EC2'.format(
                    name_or_id))
        return matches[0]

    def create_template(self, *args, **kwargs):
        raise NotImplementedError

    # TODO: Move everything below here into the entity/class-based structure

    def create_s3_bucket(self, bucket_name):
        self.logger.info("Creating bucket: '%s'", bucket_name)
        try:
            self.s3_connection.create_bucket(Bucket=bucket_name,
                                             CreateBucketConfiguration={
                                                 'LocationConstraint':
                                                 self.kwargs.get('region')
                                             })
            self.logger.info("Success: Bucket was successfully created.")
            return True
        except Exception:
            self.logger.exception(
                "Error: Bucket was not successfully created.")
            return False

    def upload_file_to_s3_bucket(self, bucket_name, file_path, file_name):
        bucket = self.s3_connection.Bucket(bucket_name)
        self.logger.info("uploading file '%s' to bucket: '%s'", file_path,
                         bucket_name)
        if os.path.isfile(file_path):
            try:
                bucket.upload_file(file_path, file_name)
                self.logger.info("Success: uploading file completed")
                return True
            except Exception:
                self.logger.exception("File upload failed.")
                return False
        else:
            self.logger.error("Error: File to upload does not exist.")
            return False

    def object_exists_in_bucket(self, bucket_name, object_key):
        bucket = self.s3_connection.Bucket(name=bucket_name)
        objects = [o for o in bucket.objects.all() if o.key == object_key]
        return any(objects)

    def delete_s3_bucket(self, bucket_name):
        """TODO: Force delete - delete all objects and then bucket"""
        bucket = self.s3_connection.Bucket(bucket_name)
        self.logger.info("Trying to delete bucket '%s'", bucket_name)
        try:
            bucket.delete()
            self.logger.info("Success: bucket '%s' was deleted.", bucket_name)
            return True
        except Exception:
            self.logger.exception("Bucket '%s' deletion failed", bucket_name)
            return False

    def delete_objects_from_s3_bucket(self, bucket_name, object_keys):
        """Delete each of the given object_keys from the given bucket"""
        if not isinstance(object_keys, list):
            raise ValueError(
                "object_keys argument must be a list of key strings")
        bucket = self.s3_connection.Bucket(name=bucket_name)
        try:
            bucket.delete_objects(Delete={
                'Objects': [{
                    'Key': object_key
                } for object_key in object_keys]
            })
            return True
        except Exception:
            self.logger.exception(
                "Deleting object keys %s from Bucket '%s' failed", object_keys,
                bucket_name)
            return False

    def get_all_disassociated_addresses(self):
        return [
            addr for addr in self.api.get_all_addresses()
            if not addr.instance_id and not addr.network_interface_id
        ]

    def release_vpc_address(self, alloc_id):
        self.logger.info(" Releasing EC2 VPC EIP '%s'", str(alloc_id))
        try:
            self.api.release_address(allocation_id=alloc_id)
            return True

        except ActionTimedOutError:
            return False

    def release_address(self, address):
        self.logger.info(" Releasing EC2-CLASSIC EIP '%s'", address)
        try:
            self.api.release_address(public_ip=address)
            return True

        except ActionTimedOutError:
            return False

    def get_all_unattached_volumes(self):
        return [
            volume for volume in self.api.get_all_volumes()
            if not volume.attach_data.status
        ]

    def delete_sqs_queue(self, queue_name):
        self.logger.info(" Deleting SQS queue '%s'", queue_name)
        try:
            queue = self.sqs_connection.get_queue(queue_name=queue_name)
            if queue:
                self.sqs_connection.delete_queue(queue=queue)
                return True
            else:
                return False

        except ActionTimedOutError:
            return False

    def get_all_unused_loadbalancers(self):
        return [
            loadbalancer
            for loadbalancer in self.elb_connection.get_all_load_balancers()
            if not loadbalancer.instances
        ]

    def delete_loadbalancer(self, loadbalancer):
        self.logger.info(" Deleting Elastic Load Balancer '%s'",
                         loadbalancer.name)
        try:
            self.elb_connection.delete_load_balancer(loadbalancer.name)
            return True

        except ActionTimedOutError:
            return False

    def get_all_unused_network_interfaces(self):
        return [
            eni for eni in self.api.get_all_network_interfaces()
            if eni.status == "available"
        ]

    def import_image(self, s3bucket, s3key, format="vhd", description=None):
        self.logger.info(
            " Importing image %s from %s bucket with description %s in %s started successfully.",
            s3key, s3bucket, description, format)
        try:
            result = self.ec2_connection.import_image(DiskContainers=[{
                'Description':
                description if description is not None else s3key,
                'Format':
                format,
                'UserBucket': {
                    'S3Bucket': s3bucket,
                    'S3Key': s3key
                }
            }])
            task_id = result.get("ImportTaskId")
            return task_id

        except Exception:
            self.logger.exception("Import of image '%s' failed.", s3key)
            return False

    def copy_image(self, source_region, source_image, image_id):
        self.logger.info(
            " Copying image %s from region %s to region %s with image id %s",
            source_image, source_region, self.kwargs.get('region'), image_id)
        try:
            copy_image = self.ec2_connection.copy_image(
                SourceRegion=source_region,
                SourceImageId=source_image,
                Name=image_id)
            return copy_image.image_id

        except Exception:
            self.logger.exception("Copy of image '%s' failed.", source_image)
            return False

    def get_import_image_task(self, task_id):
        result = self.ec2_connection.describe_import_image_tasks(
            ImportTaskIds=[task_id])
        result_task = result.get("ImportImageTasks")
        return result_task[0]

    def get_image_id_if_import_completed(self, task_id):
        result = self.get_import_image_task(task_id)
        result_status = result.get("Status")
        if result_status == 'completed':
            return result.get("ImageId")
        else:
            return False

    def list_topics(self):
        return self.sns_connection.list_topics()

    def get_arn_if_topic_exists(self, topic_name):
        topics = self.list_topics()

        # There is no way to get topic_name, so it
        # has to be parsed from ARN, which looks
        # like this: arn:aws:sns:sa-east-1:ACCOUNT_NUM:AWSConfig_topic

        topic_found = [
            t.get('TopicArn') for t in topics.get('Topics')
            if t.get('TopicArn').split(':')[-1] == topic_name
        ]
        if topic_found:
            return topic_found[0]
        else:
            return False

    def delete_topic(self, arn):
        self.logger.info(" Deleting SNS Topic '%s'", arn)
        try:
            self.sns_connection.delete_topic(TopicArn=arn)
            return True

        except Exception:
            self.logger.exception("Delete of topic '%s' failed.", arn)
            return False

    def volume_exists_and_available(self, volume_name=None, volume_id=None):
        """
        Method for checking existence and availability state for volume

        Args:
            volume_name: Name of volume, if not set volume_id must be set
            volume_id: ID of volume in format vol-random_chars, if not set volume_name must be set

        Returns:
            True if volume exists and is available.
            False if volume doesn't exist or is not available.
        """
        if volume_id:
            try:
                response = self.ec2_connection.describe_volumes(
                    VolumeIds=[volume_id],
                    Filters=[{
                        'Name': 'status',
                        'Values': ['available']
                    }])
                if response.get('Volumes'):
                    return True
                else:
                    return False
            except Exception:
                return False
        elif volume_name:
            response = self.ec2_connection.describe_volumes(
                Filters=[{
                    'Name': 'status',
                    'Values': ['available']
                }, {
                    'Name': 'tag:Name',
                    'Values': [volume_name]
                }])
            if response.get('Volumes'):
                return True
            else:
                return False
        else:
            raise TypeError(
                "Neither volume_name nor volume_id were specified.")

    def snapshot_exists(self, snapshot_name=None, snapshot_id=None):
        """
        Method for checking existence of snapshot.

        Args:
            snapshot_name: Name of snapshot, if not set snapshot_id must be set.
            snapshot_id: Id of snapshot in format snap-random_chars, if not set snapshot_name
            must be set.

        Returns:
            True if snapshot exists.
            False if snapshot doesn't exist.
        """
        if snapshot_id:
            try:
                response = self.ec2_connection.describe_snapshots(
                    SnapshotIds=[snapshot_id])
                if response.get('Snapshots'):
                    return True
                else:
                    return False
            except Exception:
                return False
        elif snapshot_name:
            response = self.ec2_connection.describe_snapshots(
                Filters=[{
                    'Name': 'tag:Name',
                    'Values': [snapshot_name]
                }])
            if response.get('Snapshots'):
                return True
            else:
                return False
        else:
            raise TypeError(
                "Neither snapshot_name nor snapshot_id were specified.")

    def copy_snapshot(self, source_snapshot_id, source_region=None):
        """
        This method is not working properly because of bug in boto3.
        It creates new snapshot with empty size and error.
        Args:
            source_snapshot_id: Id of source snapshot in format snap-random_chars
            source_region: Source region, if not set then ec2_connection region

        Returns:
            True when snapshot copy started successfully.
            False when snapshot copy didn't start.
        """
        if not source_region:
            source_region = self.kwargs.get('region')
        try:
            self.ec2_connection.copy_snapshot(
                SourceRegion=source_region,
                SourceSnapshotId=source_snapshot_id,
                DestinationRegion=source_region)
            return True
        except Exception:
            self.logger.exception("Copy snapshot with id '%s' failed.",
                                  source_snapshot_id)
            return False

    def list_load_balancer(self):
        self.logger.info("Attempting to List EC2 Load Balancers")
        return [
            loadbalancer.name
            for loadbalancer in self.elb_connection.get_all_load_balancers()
        ]

    def list_network(self):
        self.logger.info("Attempting to List EC2 Virtual Private Networks")
        networks = self.ec2_connection.describe_network_acls()['NetworkAcls']
        # EC2 api does not return the tags of the networks.... so returns only the IDs.
        return [vpc_id['VpcId'] for vpc_id in networks]

    def list_subnet(self):
        self.logger.info("Attempting to List EC2 Subnets")
        subnets = self.ec2_connection.describe_subnets()['Subnets']
        subnets_names = []

        # Subnets are not having mandatory tags names. They can have multiple tags, but only the tag
        # 'Name' will be taken as the subnet name. If not tag is given, CFME displays the SubnetId
        for subnet in subnets:
            if 'Tags' in subnet and subnet['Tags']:
                for tag in subnet['Tags']:
                    if 'Name' in tag.values():
                        subnets_names.append(tag['Value'])
            else:
                subnets_names.append(subnet['SubnetId'])
        return subnets_names

    def list_security_group(self):
        self.logger.info("Attempting to List EC2 security groups")
        return [sec_gp.name for sec_gp in self.api.get_all_security_groups()]

    def list_router(self):
        route_tables = self.ec2_connection.describe_route_tables(
        )['RouteTables']
        routers_names = []

        # Routers names are tags which are not mandatory, and tag with key called Name will be
        # used to name the router. If no tag name is provided, the routerTableId will be
        # displayed as name in CFME.
        for route in route_tables:
            if route['Tags']:
                for tag in route['Tags']:
                    if 'Name' in tag.values():
                        routers_names.append(tag['Value'])
            else:
                routers_names.append(route['RouteTableId'])

        return routers_names