Exemple #1
0
 def get_subnets_for_elb(self, scheme=None, env=None):
     vpc = Vpc()
     v = vpc.get_vpc_from_env(env=env)
     azs = Misc.get_azs_from_yaml(region=self.region)
     if scheme == "internal":
         avail = "private"
     else:
         avail = "public"
     res = []
     sub = vpc.get_all_subnets(
         filters=[{'Name': 'tag:Availability', 'Values': [avail]}, {'Name': 'availabilityZone', 'Values': azs},
                  {'Name': 'vpc-id', 'Values': [v.get('VpcId')]}])
     for s in sub:
         logger.debug("Adding sub: %s" % sub, )
         res.append(s.get('SubnetId'))
     logger.debug("Subnets for elb are: %s" % res, )
     return res
Exemple #2
0
 def deploy_snappy(self, env, num, dryrun, accountid, newrelic, channelname,
                   devicestring, branch):
     from wrapper.ec2 import Ec2
     from wrapper.vpc import Vpc
     ec2 = Ec2(session=self.session)
     vpc = Vpc(session=self.session)
     vpc_obj = vpc.get_vpc_from_env(env=env)
     num = int(num)
     snappyindex = self.get_snappy_index(num=num,
                                         vpcid=vpc_obj.get('VpcId'))
     lambda_function_args = {
         'env': "infra",
         'puppet_role': 'benchmarkslave',
         'requester': "benchmark",
         'xively_service': "benchmark_slave",
         'customer': "",
         'shutdown': "stop",
         'dry_run': dryrun,
         'base_ami': "benchmarkslave",
         'instance_type': 'c3.xlarge',
         'snappyindex': snappyindex,
         'accountid': accountid,
         'channelname': channelname,
         'newrelic': newrelic,
         'iam': 'infra-benchmarkslave',
         'ebsoptimized': False,
         'monitoring': False,
         'devicestring': devicestring,
         'branch': branch
     }
     lambda_function_args['userdata'] = Misc.get_userdata_for_os(
         ostype="snappy")
     baseami_object = self.get_ami_from_tag(
         puppet_role=lambda_function_args['base_ami'])
     lambda_function_args['baseamiid'] = baseami_object.get('ImageId')
     availability = Misc.get_value_from_array_hash(
         dictlist=baseami_object.get('Tags'), key='Availability')
     lambda_function_args['ostype'] = Misc.get_value_from_array_hash(
         dictlist=baseami_object.get('Tags'), key='Os')
     lambda_function_args['keypair'] = Misc.get_value_from_array_hash(
         dictlist=vpc_obj.get('Tags'), key='Keypair')
     lambda_function_args['instance_name'] = ec2.generate_ec2_unique_name(
         env=env, puppet_role="benchmarkslave", num=num)
     lambda_function_args[
         'securitygroup'] = ec2.get_security_group_ids_for_stack(
             vpcid=vpc_obj.get('VpcId'),
             puppet_role="benchmarkslave",
             ostype=lambda_function_args['ostype'],
             xively_service="benchmark_slave")
     subnets = vpc.get_all_subnets(filters=[{
         'Name': 'tag:Network',
         'Values': [availability]
     }, {
         'Name': 'vpc-id',
         'Values': [vpc_obj.get('VpcId')]
     }])
     lambda_function_args['subnet'] = ec2.get_subnet_with_algorithym(
         puppet_role="benchmarkslave",
         subnets=subnets,
         num=num,
         fillup=False,
         xively_service="benchmark_slave")
     ## Get broker IP address
     broker = ec2.get_ec2_instances(
         filters=[{
             'Name': 'vpc-id',
             'Values': [vpc_obj.get('VpcId')]
         }, {
             'Name': 'tag:Xively_service',
             'Values': ['benchmark_master']
         }, {
             'Name': 'tag:Puppet_role',
             'Values': ['linuxbase']
         }])
     lambda_function_args['broker'] = broker[0].get(
         'PrivateIpAddress') + ":8883"
     instances = Misc.parallel_map_reduce(
         lambda x: self.create_instance_lamdba(args=lambda_function_args),
         lambda x, y: x + [y], xrange(0, num), [])
     return instances
Exemple #3
0
    def launch_auto_scaling_group(self,
                                  env=None,
                                  stack=None,
                                  min_size=None,
                                  max_size=None,
                                  xively_service=None,
                                  requester=None,
                                  load_balancer_name=None,
                                  health_check=None,
                                  health_check_grace_period=None,
                                  availability=None,
                                  customer=None):
        a = AutoScaling()
        v = Vpc()
        c = CloudWatch()

        logger.info("Starting creation of auto-scaling group")

        auto_scaling_group_name = a.generate_auto_scaling_group_name(
            env=env, stack=stack, xively_service=xively_service)
        launch_config_name = a.generate_launch_config_name(
            env=env, stack=stack, xively_service=xively_service)
        lc_exists = a.check_launch_config_exists(env=env,
                                                 xively_service=xively_service,
                                                 stack=stack)
        if lc_exists is False:
            logger.info("Starting to Create Launch Configuration: %s" %
                        launch_config_name)
            a.create_launch_config(launch_config_name=launch_config_name,
                                   env=env,
                                   xively_service=xively_service,
                                   stack=stack)
        else:
            logger.info("Launch Configuration %s Already Exists" %
                        launch_config_name)

        vpc = v.get_vpc_from_env(env=env)
        subnet_filter = v.get_all_subnets(
            filters=[{
                "Name": "tag:Availability",
                "Values": [
                    availability,
                ]
            }, {
                "Name": "vpc-id",
                "Values": [
                    vpc.get('VpcId'),
                ]
            }])
        vpc_zones = ""
        for s in subnet_filter:
            vpc_zones += str(s['SubnetId'])
            vpc_zones += str(",")

        tags = [{
            "ResourceId": auto_scaling_group_name,
            "ResourceType": "auto-scaling-group",
            "PropagateAtLaunch": False,
            "Key": "Name",
            "Value": auto_scaling_group_name
        }, {
            "ResourceId": auto_scaling_group_name,
            "ResourceType": "auto-scaling-group",
            "PropagateAtLaunch": True,
            "Key": "Requester",
            "Value": requester
        }, {
            "ResourceId": auto_scaling_group_name,
            "ResourceType": "auto-scaling-group",
            "PropagateAtLaunch": True,
            "Key": "Puppet_role",
            "Value": stack
        }, {
            "ResourceId": auto_scaling_group_name,
            "ResourceType": "auto-scaling-group",
            "PropagateAtLaunch": True,
            "Key": "Xively_service",
            "Value": xively_service
        }, {
            "ResourceId": auto_scaling_group_name,
            "ResourceType": "auto-scaling-group",
            "PropagateAtLaunch": True,
            "Key": "Environment",
            "Value": env
        }, {
            "ResourceId": auto_scaling_group_name,
            "ResourceType": "auto-scaling-group",
            "PropagateAtLaunch": True,
            "Key": "Customer",
            "Value": customer
        }]

        asg = []
        asg.append(auto_scaling_group_name)
        asg_exists = a.check_auto_scaling_group_exists(
            auto_scaling_group_name=asg)
        if asg_exists is False:
            logger.info("Starting to Create Auto Scaling Group: %s" %
                        launch_config_name)
            a.run_auto_scaling_group(
                auto_scaling_group_name=auto_scaling_group_name,
                min_size=min_size,
                max_size=max_size,
                launch_config_name=launch_config_name,
                load_balancer_name=load_balancer_name,
                health_check=health_check,
                health_check_grace_period=health_check_grace_period,
                vpc_zones=vpc_zones,
                tags=tags)
        # resp = a.get_status_auto_scaling_group(auto_scaling_group_name=auto_scaling_group_name)
        #            logger.info(resp)
        else:
            logger.info("Auto Scaling Group %s Already Exists" %
                        launch_config_name)

        a.create_scaling_policy(env=env,
                                stack=stack,
                                xively_service=xively_service)
        c.create_alarm_for_auto_scaling_group(env=env,
                                              stack=stack,
                                              xively_service=xively_service)
Exemple #4
0
    def create_ec2_instance(self,
                            puppet_role,
                            env,
                            requester,
                            customer,
                            xively_service,
                            base_ami,
                            iam,
                            instance_type,
                            dry_run,
                            shutdown,
                            monitoring,
                            fillup,
                            num,
                            keypair,
                            availability=None):
        """
        This function creates an ec2 instance
        :param puppet_role: the Puppet_role that should be used
        :param env:  the environment where we should provision to
        :param requester: the user/team requesting the machine
        :param customer: For future use only
        :param xively_service: the Xively_service that should be used
        :param base_ami: the base_ami that should be used. Can default to puppet_role
        :param iam: The iam role that should be attached, defaults to ec2-base
        :param instance_type: the type of instance requested
        :param dry_run: No changes should be done
        :param shutdown: The shutdown behavior to use
        :param monitoring: Should monitoring be enabled
        :param fillup: Should fillup algorithym be used or round robin
        :param num: the number of instances
        :return: a list of instance objects
        """
        from wrapper.ec2 import Ec2
        from wrapper.vpc import Vpc
        from misc import Misc
        from core.stackdata import stackdata
        stackdata_object = stackdata(session=self.session)
        ec2 = Ec2(session=self.session)
        vpc = Vpc(session=self.session)
        lambda_function_args = {
            'env': env,
            'puppet_role': puppet_role,
            'requester': requester,
            'xively_service': xively_service,
            'customer': customer,
            'shutdown': shutdown,
            'dry_run': dry_run
        }
        stack_data = stackdata_object.get_stack_data(
            puppet_role=puppet_role, xively_service=xively_service)
        vpc_obj = vpc.get_vpc_from_env(env=env)
        ## Find the baseami object that needs to be used
        if base_ami:
            base_ami = base_ami
        elif 'ami' in stack_data:
            base_ami = stack_data['ami']
        else:
            logger.info("Falling back to puppet_role as AMI name")
            base_ami = puppet_role
        logger.info("The baseami that is going to be used: %s" % (base_ami, ))
        baseami_object = self.get_ami_from_tag(puppet_role=base_ami)

        ## Get values for lambda function
        lambda_function_args['baseamiid'] = baseami_object.get('ImageId')
        if (availability == None):
            availability = Misc.get_value_from_array_hash(
                dictlist=baseami_object.get('Tags'), key='Availability')
        lambda_function_args['ostype'] = Misc.get_value_from_array_hash(
            dictlist=baseami_object.get('Tags'), key='Os')
        if keypair is not None:
            lambda_function_args['keypair'] = keypair
        else:
            lambda_function_args['keypair'] = Misc.get_value_from_array_hash(
                dictlist=vpc_obj.get('Tags'), key='Keypair')

        ## Find the instance_type that needs to be used
        if instance_type:
            inst_type_final = instance_type
        elif 'instance_type' in stack_data and env in stack_data[
                'instance_type']:
            inst_type_final = stack_data['instance_type'][env]
        else:
            inst_type_final = Misc.get_value_from_array_hash(
                dictlist=baseami_object.get('Tags'), key='Instancetype')
        logger.info("Instance type that will be used: %s" %
                    (inst_type_final, ))
        lambda_function_args['instance_type'] = inst_type_final

        ## Find the instance profile that needs to be used
        if iam:
            iam_name = iam
        elif 'iam' in stack_data:
            iam_name = "%s-%s" % (env, stack_data['iam']['name_postfix'])
        else:
            iam_name = "ec2-base"
        logger.info("Base iam instance profile name: %s" % (iam_name, ))
        lambda_function_args['iam'] = iam_name

        ## Find value for ebsoptimized
        if 'ebsoptimized' in stack_data and env in stack_data['ebsoptimized']:
            lambda_function_args['ebsoptimized'] = Misc.str2bool(
                stack_data['ebsoptimized'][env])
        else:
            lambda_function_args['ebsoptimized'] = False

        ## Find value for monitoring enablement
        if monitoring:
            lambda_function_args['monitoring'] = monitoring
        elif 'monitoring' in stack_data and env in stack_data['monitoring']:
            lambda_function_args['monitoring'] = Misc.str2bool(
                stack_data['monitoring'][env])
        else:
            lambda_function_args['monitoring'] = False

        ## Generate instance names for all required instances
        lambda_function_args['instance_name'] = ec2.generate_ec2_unique_name(
            env=env, puppet_role=puppet_role, num=num)
        ## Gather all security groups needed for creating an instance stack
        lambda_function_args[
            'securitygroup'] = ec2.get_security_group_ids_for_stack(
                vpcid=vpc_obj.get('VpcId'),
                puppet_role=puppet_role,
                ostype=lambda_function_args['ostype'],
                xively_service=xively_service)
        # We need to retrieve the subnets from Vpc object, and pass it to Ec2 object
        subnets = vpc.get_all_subnets(filters=[{
            'Name': 'tag:Network',
            'Values': [availability]
        }, {
            'Name': 'vpc-id',
            'Values': [vpc_obj.get('VpcId')]
        }])
        lambda_function_args['subnet'] = ec2.get_subnet_with_algorithym(
            puppet_role=puppet_role,
            subnets=subnets,
            num=num,
            fillup=fillup,
            xively_service=xively_service)
        instances = Misc.parallel_map_reduce(
            lambda x: self.create_instance_lamdba(args=lambda_function_args),
            lambda x, y: x + [y], xrange(0, num), [])
        return instances