Esempio n. 1
0
 def route53_info(self, env=None):
     envs = super(awsrequests, self).get_needed_envs(env=env)
     v = Vpc()
     r = Route53()
     res = []
     for environment in envs:
         logger.debug("Working in env: %s" % environment)
         vpc = v.get_vpc_from_env(env=environment)
         domain = Misc.get_value_from_array_hash(dictlist=vpc.get('Tags'),
                                                 key="Domain")
         zoneid = r.get_zoneid_from_domain(domain=domain)
         records = r.list_zone_records(zoneid=zoneid)
         for record in records:
             rec = record.pop('ResourceRecords', [])
             values = []
             for rr in rec:
                 values.append(rr['Value'])
             record['Values'] = values
             if 'AliasTarget' in record:
                 aliastarget = record.pop('AliasTarget')
                 record['TTL'] = 'alias'
                 record['Values'] = [aliastarget['DNSName']]
             record['Env'] = environment
             res.append(record)
             logger.debug("Processed record is: %s" % record, )
     return res
Esempio n. 2
0
 def get_active_envs(self):
     """
     This function returns all active VPC environments in an account
     :return: a dict containing the tags
     """
     from wrapper.vpc import Vpc
     vpc = Vpc(session=self.session)
     return vpc.get_active_envs()
Esempio n. 3
0
 def get_elb_from_env(self, env=None):
     v = Vpc()
     vpc = v.get_vpc_from_env(env=env)
     elbs = self.get_all_elbs()
     ret = []
     for elb in elbs:
         if elb['VPCId'] == vpc.get('VpcId'):
             ret.append(elb)
     logger.debug("Elbs in env %s : %s" % (env, ret))
     return ret
Esempio n. 4
0
 def get_sgs_for_elb(self, env=None, name=None):
     ec2 = Ec2()
     vpc = Vpc()
     v = vpc.get_vpc_from_env(env=env)
     sgs = ec2.get_security_groups(
         filters=[{'Name': 'tag:ELB', 'Values': [name]}, {'Name': 'vpc-id', 'Values': [v.get('VpcId')]}])
     res = []
     for sg in sgs:
         res.append(sg.get('GroupId'))
     logger.debug("Sgs for the elb are %s" % res, )
     return res
Esempio n. 5
0
 def information_vpc(self, columns, filters):
     from wrapper.vpc import Vpc
     from misc import Misc
     vpc = Vpc(session=self.session)
     result = vpc.information_vpc(filters=filters)
     ret = []
     for instance in result:
         ret.append(
             Misc.parse_object(object=instance,
                               columns=columns,
                               service="vpc"))
     return ret
Esempio n. 6
0
 def get_server_cert_for_env(self, env=None):
     certs = self.get_server_certs()
     v = Vpc()
     vpc = v.get_vpc_from_env(env=env)
     domain = Misc.get_value_from_array_hash(dictlist=vpc['Tags'], key='Domain')
     cert_name = "star." + domain
     logger.debug("Searching for cert domain %s" % cert_name, )
     for c in certs:
         logger.debug("Investigateing Cert %s" % c, )
         if c['ServerCertificateName'] == cert_name:
             logger.debug("We found the server certificate we are looking for")
             return c
     logger.warning("Could not find the certificate for %s" % env, )
     return None
Esempio n. 7
0
 def info_all(self):
     elbs = self.get_all_elbs()
     result = []
     yaml = Misc.get_aws_yaml(yaml_file="elb")
     V = Vpc()
     for lb in elbs:
         tmp = lb['LoadBalancerName'].split('-')
         if len(tmp) >= 3:
             elb_env = tmp.pop(0)
             short_env = tmp.pop(0)
             elb_stack = "-".join(tmp)
         elb_facing = lb['Scheme']
         if elb_stack in yaml and elb_facing in yaml[elb_stack]:
             yaml_info = yaml[elb_stack][elb_facing]
             v = V.get_vpc_from_env(env=elb_env)
             domain = Misc.get_value_from_array_hash(dictlist=v.get('Tags'), key="Domain")
             if elb_facing == "internet-facing":
                 elb_dns_name = yaml_info['dns'] + "." + Misc.change_domain_to_local(domain=domain)
             elif elb_facing == "internal":
                 elb_dns_name = yaml_info['dns'] + "." + Misc.change_domain_to_local(domain=domain)
         else:
             elb_dns_name = None
         info = {}
         if elb_dns_name is not None:
             info['DNS cname'] = elb_dns_name
         else:
             info['DNS cname'] = "This elb is not in automatisation framework. Will be decomissioned"
         info['Xively_service'] = Misc.get_value_from_array_hash(dictlist=lb.get('Tags'), key="Xively_service")
         info['Puppet_role'] = Misc.get_value_from_array_hash(dictlist=lb.get('Tags'), key="Puppet_role")
         info['Env'] = Misc.get_value_from_array_hash(dictlist=lb.get('Tags'), key="Environment")
         info['Real endpoint'] = lb['DNSName']
         info['Vpcid'] = lb['VPCId']
         info['Name'] = lb['LoadBalancerName']
         info['CreateTime'] = lb['CreatedTime'].strftime("%Y-%m-%d %H:%M")
         info['Facing'] = elb_facing
         info['Availability Zones'] = lb['AvailabilityZones']
         info['Securitygroups'] = lb['SecurityGroups']
         instance = []
         for i in lb['Instances']:
             instance.append(i['InstanceId'])
         info['InstanceIds'] = instance
         listen = []
         for listener in lb['ListenerDescriptions']:
             listener = listener['Listener']
             listen.append(
                 "%s-%s-%s" % (listener['LoadBalancerPort'], listener['InstancePort'], listener['Protocol']))
         info['From-To-Protocol'] = listen
         result.append(info)
     return result
Esempio n. 8
0
 def get_subnets_for_elb(self, scheme=None, env=None):
     vpc = Vpc()
     v = vpc.get_vpc_from_env(env=env)
     azs = Misc.get_azs_from_yaml(region=self.region)
     if scheme == "internal":
         avail = "private"
     else:
         avail = "public"
     res = []
     sub = vpc.get_all_subnets(
         filters=[{'Name': 'tag:Availability', 'Values': [avail]}, {'Name': 'availabilityZone', 'Values': azs},
                  {'Name': 'vpc-id', 'Values': [v.get('VpcId')]}])
     for s in sub:
         logger.debug("Adding sub: %s" % sub, )
         res.append(s.get('SubnetId'))
     logger.debug("Subnets for elb are: %s" % res, )
     return res
Esempio n. 9
0
 def deploy_stack_to_env(self, env, file, dryrun):
     stack_json = Misc.parse_file_to_json(file=file)
     from misc import Validator
     stack_json = Validator.validate_kerrigan_json(stack_data=stack_json,
                                                   env=env)
     if 'cloudformation' in stack_json:
         cloudformation_json = stack_json.pop("cloudformation")
         from misc import Translater
         from wrapper.vpc import Vpc
         from wrapper.cloudformation import Cloudformation
         vpc = Vpc(session=self.session)
         cloudformation = Cloudformation(session=self.session)
         env_vpc = vpc.get_vpc_from_env(env=env)
         env_cidr = env_vpc['CidrBlock']
         ami = self.get_ami_from_tag(puppet_role=stack_json['ami'])
     else:
         cloudformation_json = None
     self.upload_stack_to_dynamodb(item=stack_json)
     # Do changes from kerrigan
     if cloudformation_json:
         logger.info(msg="Doing security group transformations")
         cloudformation_json = Translater.translate_security_group_ip_address_in_cloudformation(
             cloudformation_json=cloudformation_json, env_cidr=env_cidr)
         cloudformation_parameters = Translater.gather_information_for_cloudofrmation_parameters(
             stack_data=stack_json, vpc=env_vpc, ami=ami)
     # Do pre kerrigan tasks
     # Do cloudformation
     if cloudformation_json:
         stackname = "%s-%s-%s" % (env, stack_json['puppet_role'],
                                   stack_json['xively_service'])
         if cloudformation.stack_exists(stackname=stackname):
             cloudformation.update_stack(
                 stackname=stackname,
                 templatebody=cloudformation_json,
                 dryrun=dryrun,
                 parameters=cloudformation_parameters)
         else:
             cloudformation.create_stack(
                 stackname=stackname,
                 templatebody=cloudformation_json,
                 dryrun=dryrun,
                 parameters=cloudformation_parameters)
Esempio n. 10
0
 def envs_with_domains(self):
     v = Vpc()
     envs = v.get_active_envs()
     res = {}
     for env in envs:
         res[env] = []
         vpc = v.get_vpc_from_env(env=env)
         domain = Misc.get_value_from_array_hash(dictlist=vpc.get('Tags'),
                                                 key='Domain')
         res[env].append(domain)
         logger.debug('Working on env %s and domain %s' % (env, domain))
         split_domain = Misc.string_to_array(string=domain)
         last_dn = split_domain.pop()
         # local entry
         if env == "prod":
             local_tmp = ["prod"] + split_domain + ['local']
         else:
             local_tmp = split_domain + ['local']
         res[env].append(
             Misc.join_list_to_string(list=local_tmp, join_with='.'))
     return res
Esempio n. 11
0
 def get_needed_envs(self, env=None):
     '''
     This sub returns all valid environments, and if nothing is requested returns all in account
     :param env: a comma seperated list of environments to check
     :type env: string with comma
     :return: list of valid environments
     :rtype: list
     '''
     v = Vpc()
     active = v.get_active_envs()
     if env:
         logger.debug("Env is defined, parsing %s" % env,)
         e = env.split(',')
         envs = []
         for i in e:
             if i in active:
                 envs.append(i)
     else:
         logger.debug("No specific env environment was requested, providing all")
         envs = active
     logger.info("Going to iterate over envs: %s" % envs,)
     return envs
Esempio n. 12
0
    def create_launch_config(self,
                             launch_config_name=None,
                             env=None,
                             xively_service=None,
                             stack=None):
        e = Ec2()
        v = Vpc()
        vpc = v.get_vpc_from_env(env=env)
        keyname = Misc.get_value_from_array_hash(dictlist=vpc['Tags'],
                                                 key='Keypair')
        baseami = e.query_base_image(stack=stack)
        ostype = Misc.get_value_from_array_hash(dictlist=baseami['Tags'],
                                                key='Os')
        instance_type = Misc.get_value_from_array_hash(
            dictlist=baseami['Tags'], key='Instancetype')
        image = baseami.get('ImageId')
        sgs = e.get_security_group_ids_for_launch(
            vpcid=vpc.get('VpcId'),
            stack=stack,
            ostype=ostype,
            xively_service=xively_service)
        iam = "ec2"
        y = Misc.get_app_ports_yaml('app_ports')
        port = y[xively_service]
        userdata = Misc.get_autoscale_userdata_for_os(ostype=ostype).format(
            env=env, stack=stack, xively_service=xively_service, port=port)
        monitoring = {}
        monitoring['Enabled'] = True

        self.autoscale.create_launch_configuration(
            LaunchConfigurationName=launch_config_name,
            ImageId=image,
            KeyName=keyname,
            SecurityGroups=sgs,
            UserData=userdata,
            InstanceType=instance_type,
            InstanceMonitoring=monitoring,
            IamInstanceProfile=iam)
Esempio n. 13
0
 def get_vpcid_from_env(self, env=None):
     v = Vpc()
     vpc = v.get_vpc_from_env(env=env)
     return vpc.get('VpcId')
Esempio n. 14
0
 def deploy_snappy(self, env, num, dryrun, accountid, newrelic, channelname,
                   devicestring, branch):
     from wrapper.ec2 import Ec2
     from wrapper.vpc import Vpc
     ec2 = Ec2(session=self.session)
     vpc = Vpc(session=self.session)
     vpc_obj = vpc.get_vpc_from_env(env=env)
     num = int(num)
     snappyindex = self.get_snappy_index(num=num,
                                         vpcid=vpc_obj.get('VpcId'))
     lambda_function_args = {
         'env': "infra",
         'puppet_role': 'benchmarkslave',
         'requester': "benchmark",
         'xively_service': "benchmark_slave",
         'customer': "",
         'shutdown': "stop",
         'dry_run': dryrun,
         'base_ami': "benchmarkslave",
         'instance_type': 'c3.xlarge',
         'snappyindex': snappyindex,
         'accountid': accountid,
         'channelname': channelname,
         'newrelic': newrelic,
         'iam': 'infra-benchmarkslave',
         'ebsoptimized': False,
         'monitoring': False,
         'devicestring': devicestring,
         'branch': branch
     }
     lambda_function_args['userdata'] = Misc.get_userdata_for_os(
         ostype="snappy")
     baseami_object = self.get_ami_from_tag(
         puppet_role=lambda_function_args['base_ami'])
     lambda_function_args['baseamiid'] = baseami_object.get('ImageId')
     availability = Misc.get_value_from_array_hash(
         dictlist=baseami_object.get('Tags'), key='Availability')
     lambda_function_args['ostype'] = Misc.get_value_from_array_hash(
         dictlist=baseami_object.get('Tags'), key='Os')
     lambda_function_args['keypair'] = Misc.get_value_from_array_hash(
         dictlist=vpc_obj.get('Tags'), key='Keypair')
     lambda_function_args['instance_name'] = ec2.generate_ec2_unique_name(
         env=env, puppet_role="benchmarkslave", num=num)
     lambda_function_args[
         'securitygroup'] = ec2.get_security_group_ids_for_stack(
             vpcid=vpc_obj.get('VpcId'),
             puppet_role="benchmarkslave",
             ostype=lambda_function_args['ostype'],
             xively_service="benchmark_slave")
     subnets = vpc.get_all_subnets(filters=[{
         'Name': 'tag:Network',
         'Values': [availability]
     }, {
         'Name': 'vpc-id',
         'Values': [vpc_obj.get('VpcId')]
     }])
     lambda_function_args['subnet'] = ec2.get_subnet_with_algorithym(
         puppet_role="benchmarkslave",
         subnets=subnets,
         num=num,
         fillup=False,
         xively_service="benchmark_slave")
     ## Get broker IP address
     broker = ec2.get_ec2_instances(
         filters=[{
             'Name': 'vpc-id',
             'Values': [vpc_obj.get('VpcId')]
         }, {
             'Name': 'tag:Xively_service',
             'Values': ['benchmark_master']
         }, {
             'Name': 'tag:Puppet_role',
             'Values': ['linuxbase']
         }])
     lambda_function_args['broker'] = broker[0].get(
         'PrivateIpAddress') + ":8883"
     instances = Misc.parallel_map_reduce(
         lambda x: self.create_instance_lamdba(args=lambda_function_args),
         lambda x, y: x + [y], xrange(0, num), [])
     return instances
Esempio n. 15
0
 def prepare_deployment(self, puppet_role, xively_service, env, num,
                        instance_type, base_ami, iam, requester, customer,
                        dry_run):
     from wrapper.ec2 import Ec2
     from wrapper.vpc import Vpc
     ec2 = Ec2(session=self.session)
     vpc = Vpc(session=self.session)
     vpc_obj = vpc.get_vpc_from_env(env=env)
     filter_base = [{'Name': 'vpc-id', 'Values': [vpc_obj.get('VpcId')]}]
     if xively_service:
         old_machines = ec2.get_ec2_instances(
             filters=filter_base + [{
                 'Name': 'tag:Puppet_role',
                 'Values': [puppet_role]
             }, {
                 'Name': 'tag:Xively_service',
                 'Values': ["%s_old" % xively_service]
             }])
         rollback_machines = ec2.get_ec2_instances(
             filters=filter_base +
             [{
                 'Name': 'tag:Puppet_role',
                 'Values': [puppet_role]
             }, {
                 'Name': 'tag:Xively_service',
                 'Values': ["%s_rollback" % xively_service]
             }])
         current_machines = ec2.get_ec2_instances(
             filters=filter_base + [{
                 'Name': 'tag:Puppet_role',
                 'Values': [puppet_role]
             }, {
                 'Name': 'tag:Xively_service',
                 'Values': [xively_service]
             }])
     else:
         old_machines = ec2.get_ec2_instances(
             filters=filter_base + [{
                 'Name': 'tag:Puppet_role',
                 'Values': ["%s_old" % puppet_role]
             }])
         rollback_machines = ec2.get_ec2_instances(
             filters=filter_base + [{
                 'Name': 'tag:Puppet_role',
                 'Values': ["%s_rollback" % puppet_role]
             }])
         current_machines = ec2.get_ec2_instances(
             filters=filter_base + [{
                 'Name': 'tag:Puppet_role',
                 'Values': [puppet_role]
             }])
     for old_machine in old_machines:
         logger.info(msg="Going to stop old machine %s" %
                     old_machine.get('InstanceId'))
         ec2.stop_instances(dryrun=dry_run,
                            instanceids=[old_machine.get('InstanceId')])
     for rollback_machine in rollback_machines:
         logger.info(msg="Going to stop old machine %s" %
                     rollback_machine.get('InstanceId'))
         ec2.stop_instances(
             dryrun=dry_run,
             instanceids=[rollback_machine.get('InstanceId')])
         if xively_service:
             ec2.tag_resource(
                 instanceid=rollback_machine.get('InstanceId'),
                 tags={'Xively_service': "%s_old" % xively_service})
         else:
             ec2.tag_resource(instanceid=rollback_machine.get('InstanceId'),
                              tags={'Puppet_role': "%s_old" % puppet_role})
     for current_machine in current_machines:
         logger.info(msg="Going to retag current machine %s" %
                     current_machine.get('InstanceId'))
         if xively_service:
             ec2.tag_resource(
                 instanceid=current_machine.get('InstanceId'),
                 tags={'Xively_service': "%s_rollback" % xively_service})
         else:
             ec2.tag_resource(
                 instanceid=current_machine.get('InstanceId'),
                 tags={'Puppet_role': "%s_rollback" % puppet_role})
Esempio n. 16
0
    def launch_auto_scaling_group(self,
                                  env=None,
                                  stack=None,
                                  min_size=None,
                                  max_size=None,
                                  xively_service=None,
                                  requester=None,
                                  load_balancer_name=None,
                                  health_check=None,
                                  health_check_grace_period=None,
                                  availability=None,
                                  customer=None):
        a = AutoScaling()
        v = Vpc()
        c = CloudWatch()

        logger.info("Starting creation of auto-scaling group")

        auto_scaling_group_name = a.generate_auto_scaling_group_name(
            env=env, stack=stack, xively_service=xively_service)
        launch_config_name = a.generate_launch_config_name(
            env=env, stack=stack, xively_service=xively_service)
        lc_exists = a.check_launch_config_exists(env=env,
                                                 xively_service=xively_service,
                                                 stack=stack)
        if lc_exists is False:
            logger.info("Starting to Create Launch Configuration: %s" %
                        launch_config_name)
            a.create_launch_config(launch_config_name=launch_config_name,
                                   env=env,
                                   xively_service=xively_service,
                                   stack=stack)
        else:
            logger.info("Launch Configuration %s Already Exists" %
                        launch_config_name)

        vpc = v.get_vpc_from_env(env=env)
        subnet_filter = v.get_all_subnets(
            filters=[{
                "Name": "tag:Availability",
                "Values": [
                    availability,
                ]
            }, {
                "Name": "vpc-id",
                "Values": [
                    vpc.get('VpcId'),
                ]
            }])
        vpc_zones = ""
        for s in subnet_filter:
            vpc_zones += str(s['SubnetId'])
            vpc_zones += str(",")

        tags = [{
            "ResourceId": auto_scaling_group_name,
            "ResourceType": "auto-scaling-group",
            "PropagateAtLaunch": False,
            "Key": "Name",
            "Value": auto_scaling_group_name
        }, {
            "ResourceId": auto_scaling_group_name,
            "ResourceType": "auto-scaling-group",
            "PropagateAtLaunch": True,
            "Key": "Requester",
            "Value": requester
        }, {
            "ResourceId": auto_scaling_group_name,
            "ResourceType": "auto-scaling-group",
            "PropagateAtLaunch": True,
            "Key": "Puppet_role",
            "Value": stack
        }, {
            "ResourceId": auto_scaling_group_name,
            "ResourceType": "auto-scaling-group",
            "PropagateAtLaunch": True,
            "Key": "Xively_service",
            "Value": xively_service
        }, {
            "ResourceId": auto_scaling_group_name,
            "ResourceType": "auto-scaling-group",
            "PropagateAtLaunch": True,
            "Key": "Environment",
            "Value": env
        }, {
            "ResourceId": auto_scaling_group_name,
            "ResourceType": "auto-scaling-group",
            "PropagateAtLaunch": True,
            "Key": "Customer",
            "Value": customer
        }]

        asg = []
        asg.append(auto_scaling_group_name)
        asg_exists = a.check_auto_scaling_group_exists(
            auto_scaling_group_name=asg)
        if asg_exists is False:
            logger.info("Starting to Create Auto Scaling Group: %s" %
                        launch_config_name)
            a.run_auto_scaling_group(
                auto_scaling_group_name=auto_scaling_group_name,
                min_size=min_size,
                max_size=max_size,
                launch_config_name=launch_config_name,
                load_balancer_name=load_balancer_name,
                health_check=health_check,
                health_check_grace_period=health_check_grace_period,
                vpc_zones=vpc_zones,
                tags=tags)
        # resp = a.get_status_auto_scaling_group(auto_scaling_group_name=auto_scaling_group_name)
        #            logger.info(resp)
        else:
            logger.info("Auto Scaling Group %s Already Exists" %
                        launch_config_name)

        a.create_scaling_policy(env=env,
                                stack=stack,
                                xively_service=xively_service)
        c.create_alarm_for_auto_scaling_group(env=env,
                                              stack=stack,
                                              xively_service=xively_service)
Esempio n. 17
0
 def get_envs(self):
     v = Vpc()
     return v.get_active_envs()
Esempio n. 18
0
    def create_ec2_instance(self,
                            puppet_role,
                            env,
                            requester,
                            customer,
                            xively_service,
                            base_ami,
                            iam,
                            instance_type,
                            dry_run,
                            shutdown,
                            monitoring,
                            fillup,
                            num,
                            keypair,
                            availability=None):
        """
        This function creates an ec2 instance
        :param puppet_role: the Puppet_role that should be used
        :param env:  the environment where we should provision to
        :param requester: the user/team requesting the machine
        :param customer: For future use only
        :param xively_service: the Xively_service that should be used
        :param base_ami: the base_ami that should be used. Can default to puppet_role
        :param iam: The iam role that should be attached, defaults to ec2-base
        :param instance_type: the type of instance requested
        :param dry_run: No changes should be done
        :param shutdown: The shutdown behavior to use
        :param monitoring: Should monitoring be enabled
        :param fillup: Should fillup algorithym be used or round robin
        :param num: the number of instances
        :return: a list of instance objects
        """
        from wrapper.ec2 import Ec2
        from wrapper.vpc import Vpc
        from misc import Misc
        from core.stackdata import stackdata
        stackdata_object = stackdata(session=self.session)
        ec2 = Ec2(session=self.session)
        vpc = Vpc(session=self.session)
        lambda_function_args = {
            'env': env,
            'puppet_role': puppet_role,
            'requester': requester,
            'xively_service': xively_service,
            'customer': customer,
            'shutdown': shutdown,
            'dry_run': dry_run
        }
        stack_data = stackdata_object.get_stack_data(
            puppet_role=puppet_role, xively_service=xively_service)
        vpc_obj = vpc.get_vpc_from_env(env=env)
        ## Find the baseami object that needs to be used
        if base_ami:
            base_ami = base_ami
        elif 'ami' in stack_data:
            base_ami = stack_data['ami']
        else:
            logger.info("Falling back to puppet_role as AMI name")
            base_ami = puppet_role
        logger.info("The baseami that is going to be used: %s" % (base_ami, ))
        baseami_object = self.get_ami_from_tag(puppet_role=base_ami)

        ## Get values for lambda function
        lambda_function_args['baseamiid'] = baseami_object.get('ImageId')
        if (availability == None):
            availability = Misc.get_value_from_array_hash(
                dictlist=baseami_object.get('Tags'), key='Availability')
        lambda_function_args['ostype'] = Misc.get_value_from_array_hash(
            dictlist=baseami_object.get('Tags'), key='Os')
        if keypair is not None:
            lambda_function_args['keypair'] = keypair
        else:
            lambda_function_args['keypair'] = Misc.get_value_from_array_hash(
                dictlist=vpc_obj.get('Tags'), key='Keypair')

        ## Find the instance_type that needs to be used
        if instance_type:
            inst_type_final = instance_type
        elif 'instance_type' in stack_data and env in stack_data[
                'instance_type']:
            inst_type_final = stack_data['instance_type'][env]
        else:
            inst_type_final = Misc.get_value_from_array_hash(
                dictlist=baseami_object.get('Tags'), key='Instancetype')
        logger.info("Instance type that will be used: %s" %
                    (inst_type_final, ))
        lambda_function_args['instance_type'] = inst_type_final

        ## Find the instance profile that needs to be used
        if iam:
            iam_name = iam
        elif 'iam' in stack_data:
            iam_name = "%s-%s" % (env, stack_data['iam']['name_postfix'])
        else:
            iam_name = "ec2-base"
        logger.info("Base iam instance profile name: %s" % (iam_name, ))
        lambda_function_args['iam'] = iam_name

        ## Find value for ebsoptimized
        if 'ebsoptimized' in stack_data and env in stack_data['ebsoptimized']:
            lambda_function_args['ebsoptimized'] = Misc.str2bool(
                stack_data['ebsoptimized'][env])
        else:
            lambda_function_args['ebsoptimized'] = False

        ## Find value for monitoring enablement
        if monitoring:
            lambda_function_args['monitoring'] = monitoring
        elif 'monitoring' in stack_data and env in stack_data['monitoring']:
            lambda_function_args['monitoring'] = Misc.str2bool(
                stack_data['monitoring'][env])
        else:
            lambda_function_args['monitoring'] = False

        ## Generate instance names for all required instances
        lambda_function_args['instance_name'] = ec2.generate_ec2_unique_name(
            env=env, puppet_role=puppet_role, num=num)
        ## Gather all security groups needed for creating an instance stack
        lambda_function_args[
            'securitygroup'] = ec2.get_security_group_ids_for_stack(
                vpcid=vpc_obj.get('VpcId'),
                puppet_role=puppet_role,
                ostype=lambda_function_args['ostype'],
                xively_service=xively_service)
        # We need to retrieve the subnets from Vpc object, and pass it to Ec2 object
        subnets = vpc.get_all_subnets(filters=[{
            'Name': 'tag:Network',
            'Values': [availability]
        }, {
            'Name': 'vpc-id',
            'Values': [vpc_obj.get('VpcId')]
        }])
        lambda_function_args['subnet'] = ec2.get_subnet_with_algorithym(
            puppet_role=puppet_role,
            subnets=subnets,
            num=num,
            fillup=fillup,
            xively_service=xively_service)
        instances = Misc.parallel_map_reduce(
            lambda x: self.create_instance_lamdba(args=lambda_function_args),
            lambda x, y: x + [y], xrange(0, num), [])
        return instances