示例#1
0
    def init_deploymentpipelines_permission(self, permission_config, permissions_by_account):
        """
        Iterates over each pipeline reference and adds its permission config
        to the map of permissions by account.
        """
        for resource in permission_config.resources:
            pipeline_ref = Reference(resource.pipeline)
            account_ref = 'paco.ref ' + '.'.join(pipeline_ref.parts) + '.configuration.account'
            account_ref = self.paco_ctx.get_ref(account_ref)
            account_name = self.paco_ctx.get_ref(account_ref + '.name')
            if permission_config not in permissions_by_account[account_name]:
                permissions_by_account[account_name].append(permission_config)


            # Initialize The network environments that we need access into
            pipeline_config = pipeline_ref.get_model_obj(self.paco_ctx.project)
            self.paco_ctx.get_controller(pipeline_ref.parts[0], model_obj=pipeline_config)

            # Some actions in the pipeline might be in different account so we must
            # iterate the pipeline stages and actions and add them too.
            for action_name in pipeline_config.source.keys():
                action = pipeline_config.source[action_name]
                account_name = None
                if action.type == 'CodeDeploy.Deploy':
                    asg_ref = Reference(action.auto_scaling_group)
                    asg_config = asg_ref.get_model_obj(self.paco_ctx.project)
                    account_name = self.paco_ctx.get_ref(asg_config.get_account().paco_ref + '.name')
                if account_name != None:
                    if permission_config not in permissions_by_account[account_name]:
                        permissions_by_account[account_name].append(permission_config)
示例#2
0
    def create_cfn_ref_list_param(
        self,
        param_type,
        name,
        description,
        value,
        ref_attribute=None,
        default=None,
        noecho=False,
    ):
        "Create a CloudFormation Parameter from a list of refs"
        stack_output_param = StackOutputParam(name, param_template=self)
        for list_item in value:
            if is_ref(list_item):
                if ref_attribute != None:
                    list_item += '.' + ref_attribute
                stack = self.paco_ctx.get_ref(list_item)
                if isinstance(stack, Stack) == False:
                    raise PacoException(
                        PacoErrorCode.Unknown,
                        message="Reference must resolve to a stack")
                stack_output_key = self.stack.get_stack_outputs_key_from_ref(
                    Reference(list_item))
                stack_output_param.add_stack_output(stack, stack_output_key)
            else:
                stack_output_param.add_value(list_item)

        return self.create_cfn_parameter(
            param_type,
            name,
            description,
            stack_output_param,
            default,
            noecho,
        )
示例#3
0
    def get_account_context(self, account_ref=None, account_name=None, netenv_ref=None):
        """
        Get an AccountContext for an AWS Account. Will return an existing object if an AccountContext
        has already been created.

        AccountContext can be specified in three ways:
          account_ref: 'paco.ref accounts.dev'
          account_name: 'dev'
          netenv_ref: 'paco.ref netenv.mynet.dev.us-west-2.applications.myapp' # The 'dev' env is in the dev account
        """
        if account_ref != None:
            ref = Reference(account_ref)
            account_name = ref.parts[1]
        elif netenv_ref != None:
            account_ref = netenv_ref.split(' ')[1]
            account_ref = 'paco.ref netenv.'+'.'.join(account_ref.split('.', 4)[:-1])+".network.aws_account"
            account_ref = self.get_ref(account_ref)
            return self.get_account_context(account_ref=account_ref)
        elif account_name == None:
            raise InvalidAccountName("Get AccountContext failed. The account name provided is None.")

        if account_name in self.accounts:
            return self.accounts[account_name]

        account_ctx = AccountContext(
            paco_ctx=self,
            name=account_name,
            mfa_account=self.master_account,
        )
        self.accounts[account_name] = account_ctx

        return account_ctx
示例#4
0
 def add_github_webhook(self, pipeline_res, stage, action):
     "Add a CodePipeline WebHook"
     logical_id = f'Webhook{stage.name}{action.name}'
     github_access_token = Reference(action.github_access_token).ref
     cfn_export_dict = {
         'Authentication':
         'GITHUB_HMAC',
         'AuthenticationConfiguration': {
             'SecretToken':
             "{{resolve:secretsmanager:%s}}" % github_access_token,
         },
         'Filters': [{
             'JsonPath': "$.ref",
             'MatchEquals': 'refs/heads/{Branch}'
         }],
         'TargetAction':
         f'GitHub{stage.name}{action.name}',
         'RegisterWithThirdParty':
         True,
         'TargetPipeline':
         troposphere.Ref(pipeline_res),
         'TargetPipelineVersion':
         troposphere.GetAtt(pipeline_res, 'Version'),
     }
     webhook_resource = troposphere.codepipeline.Webhook.from_dict(
         logical_id,
         cfn_export_dict,
     )
     self.template.add_resource(webhook_resource)
    def init_codebuild_permission(self, permission_config, assume_role_res):
        """CodeBuild Web Console Permissions"""
        if 'ManagedPolicyArns' not in assume_role_res.properties.keys():
            assume_role_res.properties['ManagedPolicyArns'] = []

        statement_list = []
        #readwrite_codebuild_arns = []
        readonly_codebuild_arns = []
        for resource in permission_config.resources:
            codebuild_ref = Reference(resource.codebuild)
            codebuild_account_ref = 'paco.ref ' + '.'.join(
                codebuild_ref.parts[:-2]) + '.configuration.account'
            codebuild_account_ref = self.paco_ctx.get_ref(
                codebuild_account_ref)
            codebuild_account_id = self.paco_ctx.get_ref(
                codebuild_account_ref + '.id')
            if codebuild_account_id != self.account_id:
                continue

            codebuild_arn = self.paco_ctx.get_ref(resource.codebuild +
                                                  '.project.arn')

            if resource.permission == 'ReadOnly':
                if codebuild_arn not in readonly_codebuild_arns:
                    readonly_codebuild_arns.append(codebuild_arn)

        self.set_codebuild_permissions(readonly_codebuild_arns,
                                       assume_role_res, 'CodeBuild')
    def init_deploymentpipelines_permission(self, permission_config,
                                            assume_role_res):
        if 'ManagedPolicyArns' not in assume_role_res.properties.keys():
            assume_role_res.properties['ManagedPolicyArns'] = []

        pipeline_list = []
        for resource in permission_config.resources:
            pipeline_ref = Reference(resource.pipeline)
            pipeline = pipeline_ref.get_model_obj(self.paco_ctx.project)
            account_ref = pipeline.configuration.account
            account_name = self.paco_ctx.get_ref(account_ref + '.name')
            if account_name == self.account_ctx.name:
                pipeline_arn = self.paco_ctx.get_ref(pipeline.paco_ref +
                                                     '.arn')
                pipeline_list.append({
                    'permission': resource.permission,
                    'pipeline': pipeline,
                    'pipeline_arn': pipeline_arn
                })

        self.deployment_pipeline_manaul_approval_permissions(
            pipeline_list, assume_role_res)
        self.deployment_pipeline_codepipeline_permissions(
            pipeline_list, assume_role_res)
        self.deployment_pipeline_codebuild_permissions(pipeline_list,
                                                       assume_role_res)
    def init_codebuild_permission(self, permission_config, assume_role_res):
        """CodeBuild Web Console Permissions"""
        if 'ManagedPolicyArns' not in assume_role_res.properties.keys():
            assume_role_res.properties['ManagedPolicyArns'] = []

        statement_list = []
        #readwrite_codebuild_arns = []
        readonly_codebuild_arns = []
        for resource in permission_config.resources:
            codebuild_ref = Reference(resource.codebuild)
            codebuild_account_ref = 'paco.ref ' + '.'.join(
                codebuild_ref.parts[:-2]) + '.configuration.account'
            codebuild_account_ref = self.paco_ctx.get_ref(
                codebuild_account_ref)
            codebuild_account_id = self.paco_ctx.get_ref(
                codebuild_account_ref + '.id')
            if codebuild_account_id != self.account_id:
                continue

            codebuild_arn = self.paco_ctx.get_ref(resource.codebuild +
                                                  '.project.arn')

            if resource.permission == 'ReadOnly':
                if codebuild_arn not in readonly_codebuild_arns:
                    readonly_codebuild_arns.append(codebuild_arn)

        readonly_codebuild_actions = [
            Action('codebuild', 'BatchGet*'),
            Action('codebuild', 'Get*'),
            Action('codebuild', 'List*'),
            Action('cloudwatch', 'GetMetricStatistics*'),
            Action('events', 'DescribeRule'),
            Action('events', 'ListTargetsByRule'),
            Action('events', 'ListRuleNamesByTarget'),
            Action('logs', 'GetLogEvents')
        ]
        if len(readonly_codebuild_arns) > 0:
            statement_list.append(
                Statement(
                    Sid='CodeBuildReadOnly',
                    Effect=Allow,
                    Action=readonly_codebuild_actions,
                    Resource=['*']  #readonly_codebuild_arns
                ))
            #statement_list.append(
            #    Statement(
            #        Sid='OtherReadOnly',
            #        Effect=Allow,
            #        Action=readonly_other_actions,
            #        Resource=['*']
            #    )
            #)

        managed_policy_res = troposphere.iam.ManagedPolicy(
            title=self.create_cfn_logical_id("CodeBuildPolicy"),
            PolicyDocument=PolicyDocument(Version="2012-10-17",
                                          Statement=statement_list),
            Roles=[troposphere.Ref(assume_role_res)])
        self.template.add_resource(managed_policy_res)  #
示例#8
0
    def build_sns_state(self, resource):
        state = {}
        for group_name in resource.keys():
            group = resource[group_name]
            state[group_name] = resolve_ref_outputs(
                Reference(group.paco_ref + '.arn'),
                self.paco_ctx.project['home'])

        return state
    def init_systemsmanagersession_permission(self, permission_config,
                                              assume_role_res):
        if 'ManagedPolicyArns' not in assume_role_res.properties.keys():
            assume_role_res.properties['ManagedPolicyArns'] = []

        resource_group_condition_list = []
        for resource in permission_config.resources:
            resource_ref = Reference(resource)
            # Initialize The network environments that we need access into
            resource_obj = resource_ref.get_model_obj(self.paco_ctx.project)
            if schemas.IResourceGroup.providedBy(resource_obj):
                resource_group_condition_list.append(
                    StringLike({
                        'ssm:resourceTag/Paco-Application-Group-Name':
                        resource_obj.name
                    }))

        if len(resource_group_condition_list) == 0:
            return

        statement_list = []
        statement_list.append(
            Statement(
                Sid='SessionManagerStartSession',
                Effect=Allow,
                Action=[
                    Action('ssm', 'StartSession'),
                ],
                Resource=[
                    'arn:aws:ec2:*:*:instance/*',
                    'arn:aws:ssm:*::document/AWS-StartPortForwardingSession'
                ],
                Condition=Condition(resource_group_condition_list)))
        statement_list.append(
            Statement(
                Sid='SessionManagerPortForward',
                Effect=Allow,
                Action=[
                    Action('ssm', 'StartSession'),
                ],
                Resource=[
                    'arn:aws:ssm:*::document/AWS-StartPortForwardingSession'
                ]))
        statement_list.append(
            Statement(Sid='SessionManagerTerminateSession',
                      Effect=Allow,
                      Action=[
                          Action('ssm', 'TerminateSession'),
                          Action('ssm', 'ResumeSession'),
                      ],
                      Resource=['arn:aws:ssm:*:*:session/${aws:username}-*']))
        managed_policy_res = troposphere.iam.ManagedPolicy(
            title=self.create_cfn_logical_id_join(["SystemsManagerSession"]),
            PolicyDocument=PolicyDocument(Version="2012-10-17",
                                          Statement=statement_list),
            Roles=[troposphere.Ref(assume_role_res)])
        self.template.add_resource(managed_policy_res)
示例#10
0
 def init_codebuild_permission(self, permission_config, permissions_by_account):
     """
     Iterates over each codebuild reference and adds its permission config
     to the map of permissions by account.
     """
     for resource in permission_config.resources:
         codebuild_ref = Reference(resource.codebuild)
         account_ref = 'paco.ref ' + '.'.join(codebuild_ref.parts[:-2]) + '.configuration.account'
         account_ref = self.paco_ctx.get_ref(account_ref)
         account_name = self.paco_ctx.get_ref(account_ref + '.name')
         if permission_config not in permissions_by_account[account_name]:
             permissions_by_account[account_name].append(permission_config)
示例#11
0
    def __init__(self, name, parent, config_dict):
        super().__init__(name, parent)

        self.zones_by_account = {}
        if config_dict == None:
            return
        loader.apply_attributes_from_config(self, config_dict)

        for zone_id in self.hosted_zones.keys():
            hosted_zone = self.hosted_zones[zone_id]
            aws_account_ref = hosted_zone.account
            ref = Reference(aws_account_ref)
            account_name = ref.parts[1]
            if account_name not in self.zones_by_account:
                self.zones_by_account[account_name] = []
            self.zones_by_account[account_name].append(zone_id)
示例#12
0
    def get_account_context(self, account_ref=None, account_name=None, netenv_ref=None):
        if account_ref != None:
            ref = Reference(account_ref)
            account_name = ref.parts[1]
        elif netenv_ref != None:
            account_ref = netenv_ref.split(' ')[1]
            account_ref = 'paco.ref netenv.'+'.'.join(account_ref.split('.', 4)[:-1])+".network.aws_account"
            account_ref = self.get_ref(account_ref)
            return self.get_account_context(account_ref=account_ref)
        elif account_name == None:
            raise StackException(PacoErrorCode.Unknown, message = "get_account_context was only passed None: Not enough context to get account.")

        if account_name in self.accounts:
            return self.accounts[account_name]

        account_ctx = AccountContext(
            paco_ctx=self,
            name=account_name,
            mfa_account=self.master_account
        )
        self.accounts[account_name] = account_ctx

        return account_ctx
示例#13
0
 def create_github_source_properties(self, stage, action, info):
     github_access_token = Reference(action.github_access_token).ref
     github_owner_param = self.create_cfn_parameter(
         param_type='String',
         name=self.create_cfn_logical_id('GitHubOwner' + stage.name +
                                         action.name),
         description='The name of the GitHub owner',
         value=action.github_owner)
     github_repo_param = self.create_cfn_parameter(
         param_type='String',
         name=self.create_cfn_logical_id('GitHubRepository' + stage.name +
                                         action.name),
         description='The name of the GitHub Repository',
         value=action.github_repository)
     github_deploy_branch_name_param = self.create_cfn_parameter(
         param_type='String',
         name=self.create_cfn_logical_id('GitHubDeploymentBranchName' +
                                         stage.name + action.name),
         description=
         'The name of the branch where commits will trigger a build.',
         value=action.deployment_branch_name)
     output_artifact_name = '{}Artifact{}{}'.format(info['Name'],
                                                    stage.name, action.name)
     return {
         'Configuration': {
             'Owner': troposphere.Ref(github_owner_param),
             'Repo': troposphere.Ref(github_repo_param),
             'Branch': troposphere.Ref(github_deploy_branch_name_param),
             'OAuthToken':
             "{{resolve:secretsmanager:%s}}" % github_access_token,
             'PollForSourceChanges': action.poll_for_source_changes,
         },
         'OutputArtifacts': [
             troposphere.codepipeline.OutputArtifacts(
                 Name=output_artifact_name)
         ]
     }
示例#14
0
    def init_resource(self):
        self.stack_group.add_new_stack(
            self.aws_region,
            self.resource,
            paco.cftemplates.ApiGatewayRestApi,
            stack_tags=self.stack_tags,
        )

        # Stack for cross-account Lambda Permissions
        # (same account Lambda Permissions are granted with an IAM Role in the ApiGatwayRestApi Stack)
        # This Stack has to be made after Lambda and after API Gateway in the target acccount as it depends upon both
        apigateway = self.resource
        for awslambda in self.paco_ctx.project.get_all_resources_by_type(
                'Lambda'):
            for method in apigateway.methods.values():
                if method.integration != None and method.integration.integration_lambda != None:
                    if awslambda.paco_ref == method.integration.integration_lambda:
                        if apigateway.get_account(
                        ).name != awslambda.get_account().name:
                            # parse the account and region from the awslambda ref
                            lambda_ref = Reference(awslambda.paco_ref)
                            account = lambda_ref.get_account(
                                self.paco_ctx.project, awslambda)
                            account_ctx = self.paco_ctx.get_account_context(
                                account_name=account.name)

                            # XXX FixMe: if more than one Lambda in given account/region, they will have same Stack
                            # make template have permissions for all Lambdas
                            # create LambdaPermission Stack
                            self.stack_group.add_new_stack(
                                lambda_ref.region,
                                self.resource,
                                paco.cftemplates.ApiGatewayLamdaPermissions,
                                account_ctx=account_ctx,
                                stack_tags=self.stack_tags,
                                extra_context={'awslambda': awslambda},
                            )
示例#15
0
    def ec2_nat_gateway(self, network_config, nat_sg_config, nat_sg_config_ref,
                        nat_config):

        nat_az = nat_config.availability_zone
        nat_segment = nat_config.segment.split('.')[-1]
        ec2_resource = {}
        for az_idx in range(1, network_config.availability_zones + 1):
            # Add security groups created for NAT Bastions
            nat_security_groups = []
            nat_security_groups.extend(nat_config.security_groups)
            if nat_az == 'all':
                nat_sg_id = nat_config.name + "_az" + str(az_idx)
                nat_security_groups.append('paco.ref ' + nat_sg_config_ref +
                                           '.' + nat_sg_id)
            elif az_idx == int(nat_config.availability_zone):
                for nat_sg_id in nat_sg_config.keys():
                    nat_security_groups.append('paco.ref ' +
                                               nat_sg_config_ref + '.' +
                                               nat_sg_id)

            if nat_az == 'all' or nat_az == str(az_idx):
                security_group_list_param = self.create_cfn_ref_list_param(
                    param_type='List<AWS::EC2::SecurityGroup::Id>',
                    name='NATSecurityGroupListAZ' + str(az_idx),
                    description=
                    'List of security group ids to attach to the instances.',
                    value=nat_security_groups,
                    ref_attribute='id',
                )

                subnet_id_param = self.create_cfn_parameter(
                    name=self.create_cfn_logical_id_join(
                        str_list=['SubnetIdAZ',
                                  str(az_idx), nat_segment],
                        camel_case=True),
                    param_type='String',
                    description='SubnetId to launch an EC2 NAT instance',
                    value=nat_config.segment + '.az' + str(az_idx) +
                    '.subnet_id',
                )
                ref_parts = nat_config.paco_ref_parts.split('.')
                instance_name = utils.big_join(str_list=[
                    ref_parts[1], ref_parts[2], 'NGW', nat_config.name,
                    'AZ' + str(az_idx)
                ],
                                               separator_ch='-',
                                               camel_case=True)
                # ToDo: expose latest ami id as an API and call it directly
                # SLOW: takes a couple seconds to resolve this every Paco run
                latest_image_ref = Reference(
                    'paco.ref function.aws.ec2.ami.latest.amazon-linux-nat')
                latest_image_ref.set_region(self.aws_region)
                nat_ami_id = latest_image_ref.resolve(self.paco_ctx.project,
                                                      self.account_ctx)
                ec2_resource[az_idx] = troposphere.ec2.Instance(
                    title=self.create_cfn_logical_id_join(
                        str_list=['EC2NATInstance',
                                  str(az_idx)],
                        camel_case=True),
                    template=self.template,
                    SubnetId=troposphere.Ref(subnet_id_param),
                    ImageId=nat_ami_id,
                    InstanceType=nat_config.ec2_instance_type,
                    KeyName=self.paco_ctx.get_ref(nat_config.ec2_key_pair +
                                                  '.keypair_name'),
                    SecurityGroupIds=troposphere.Ref(
                        security_group_list_param),
                    SourceDestCheck=False,
                    Tags=troposphere.ec2.Tags(Name=instance_name))

                ec2_instance_id_output = troposphere.Output(
                    title=ec2_resource[az_idx].title + 'Id',
                    Description="EC2 NAT Instance Id",
                    Value=troposphere.Ref(ec2_resource[az_idx]))
                self.template.add_output(ec2_instance_id_output)

                troposphere.ec2.EIP(title=self.create_cfn_logical_id_join(
                    str_list=['ElasticIP', str(az_idx)], camel_case=True),
                                    template=self.template,
                                    Domain='vpc',
                                    InstanceId=troposphere.Ref(
                                        ec2_resource[az_idx]))

                self.register_stack_output_config(
                    nat_config.paco_ref_parts + ".ec2.az" + str(az_idx),
                    ec2_instance_id_output.title)

        # Add DefaultRoute to the route tables in each AZ
        for segment_ref in nat_config.default_route_segments:
            segment_id = segment_ref.split('.')[-1]
            # Routes
            for az_idx in range(1, network_config.availability_zones + 1):
                if nat_config.availability_zone == 'all':
                    instance_id_ref = troposphere.Ref(ec2_resource[az_idx])
                else:
                    instance_id_ref = troposphere.Ref(
                        ec2_resource[int(nat_az)])

                route_table_id_param = self.create_cfn_parameter(
                    name=self.create_cfn_logical_id_join(
                        str_list=['RouteTable', segment_id, 'AZ',
                                  str(az_idx)],
                        camel_case=True),
                    param_type='String',
                    description='RouteTable ID for ' + segment_id + ' AZ' +
                    str(az_idx),
                    value=segment_ref + ".az{}.route_table.id".format(az_idx),
                )

                troposphere.ec2.Route(
                    title="EC2NATRouteAZ" + str(az_idx),
                    template=self.template,
                    DestinationCidrBlock="0.0.0.0/0",
                    InstanceId=instance_id_ref,
                    RouteTableId=troposphere.Ref(route_table_id_param))
示例#16
0
    def __init__(self, paco_ctx, account_ctx, aws_region, stack_group,
                 stack_tags, env_ctx, app_id, grp_id, asg_id, asg_config,
                 asg_config_ref, role_profile_arn,
                 ec2_manager_user_data_script, ec2_manager_cache_id):
        self.env_ctx = env_ctx
        self.ec2_manager_cache_id = ec2_manager_cache_id
        segment_stack = self.env_ctx.get_segment_stack(asg_config.segment)

        # Super Init:
        super().__init__(paco_ctx,
                         account_ctx,
                         aws_region,
                         enabled=asg_config.is_enabled(),
                         config_ref=asg_config_ref,
                         stack_group=stack_group,
                         stack_tags=stack_tags,
                         change_protected=asg_config.change_protected)
        self.set_aws_name('ASG', grp_id, asg_id)
        self.asg_config = asg_config

        # Troposphere
        self.init_template('AutoScalingGroup: ' + self.ec2_manager_cache_id)
        template = self.template

        # InstanceAMI Parameter is preserved in disabled templates so it can be smoothly disabled/enabled
        if self.asg_config.instance_ami_ignore_changes:
            ignore_changes = True
        else:
            ignore_changes = False
        instance_ami_param = self.create_cfn_parameter(
            param_type='String',
            name='InstanceAMI',
            description='The Amazon Machine Image Id to launch instances with.',
            value=asg_config.instance_ami,
            ignore_changes=ignore_changes,
        )

        # if the network for the ASG is disabled, only use an empty placeholder
        env_region = get_parent_by_interface(asg_config,
                                             schemas.IEnvironmentRegion)
        if not env_region.network.is_enabled():
            self.set_template(template.to_yaml())
            return

        security_group_list_param = self.create_cfn_ref_list_param(
            param_type='List<AWS::EC2::SecurityGroup::Id>',
            name='SecurityGroupList',
            description=
            'List of security group ids to attach to the ASG instances.',
            value=asg_config.security_groups,
            ref_attribute='id',
        )
        instance_key_pair_param = self.create_cfn_parameter(
            param_type='String',
            name='InstanceKeyPair',
            description='The EC2 SSH KeyPair to assign each ASG instance.',
            value=asg_config.instance_key_pair + '.keypair_name',
        )
        launch_config_dict = {
            'AssociatePublicIpAddress': asg_config.associate_public_ip_address,
            'EbsOptimized': asg_config.ebs_optimized,
            'ImageId': troposphere.Ref(instance_ami_param),
            'InstanceMonitoring': asg_config.instance_monitoring,
            'InstanceType': asg_config.instance_type,
            'KeyName': troposphere.Ref(instance_key_pair_param),
            'SecurityGroups': troposphere.Ref(security_group_list_param),
        }

        # BlockDeviceMappings
        if len(asg_config.block_device_mappings) > 0:
            mappings = []
            for bdm in asg_config.block_device_mappings:
                mappings.append(bdm.cfn_export_dict)
            launch_config_dict["BlockDeviceMappings"] = mappings

        user_data_script = ''
        if ec2_manager_user_data_script != None:
            user_data_script += ec2_manager_user_data_script
        if asg_config.user_data_script != '':
            user_data_script += asg_config.user_data_script.replace(
                '#!/bin/bash', '')
        if user_data_script != '':
            user_data_64 = base64.b64encode(user_data_script.encode('ascii'))
            user_data_script_param = self.create_cfn_parameter(
                param_type='String',
                name='UserDataScript',
                description='User data script to run at instance launch.',
                value=user_data_64.decode('ascii'),
            )
            launch_config_dict['UserData'] = troposphere.Ref(
                user_data_script_param)

        if role_profile_arn != None:
            launch_config_dict['IamInstanceProfile'] = role_profile_arn

        # CloudFormation Init
        if asg_config.cfn_init and asg_config.is_enabled():
            launch_config_dict['Metadata'] = troposphere.autoscaling.Metadata(
                asg_config.cfn_init.export_as_troposphere())
            for key, value in asg_config.cfn_init.parameters.items():
                if type(value) == type(str()):
                    param_type = 'String'
                elif type(value) == type(int()) or type(value) == type(
                        float()):
                    param_type = 'Number'
                else:
                    raise UnsupportedCloudFormationParameterType(
                        "Can not cast {} of type {} to a CloudFormation Parameter type."
                        .format(value, type(value)))
                cfn_init_param = self.create_cfn_parameter(
                    param_type=param_type,
                    name=key,
                    description='CloudFormation Init Parameter {} for ASG {}'.
                    format(key, asg_config.name),
                    value=value,
                )

        # Launch Configuration resource
        launch_config_res = troposphere.autoscaling.LaunchConfiguration.from_dict(
            'LaunchConfiguration', launch_config_dict)
        template.add_resource(launch_config_res)

        subnet_list_ref = 'paco.ref {}'.format(
            segment_stack.template.config_ref)
        if asg_config.availability_zone == 'all':
            subnet_list_ref += '.subnet_id_list'
        else:
            subnet_list_ref += '.az{}.subnet_id'.format(
                asg_config.availability_zone)

        asg_subnet_list_param = self.create_cfn_parameter(
            param_type='List<AWS::EC2::Subnet::Id>',
            name='ASGSubnetList',
            description='A list of subnets where the ASG will launch instances',
            value=subnet_list_ref)

        min_instances = asg_config.min_instances if asg_config.is_enabled(
        ) else 0
        desired_capacity = asg_config.desired_capacity if asg_config.is_enabled(
        ) else 0
        desired_capacity_param = self.create_cfn_parameter(
            param_type='String',
            name='DesiredCapacity',
            description='The desired capacity of instances to run in the ASG.',
            value=desired_capacity,
            ignore_changes=self.asg_config.desired_capacity_ignore_changes,
        )
        asg_dict = {
            'AutoScalingGroupName': asg_config.get_aws_name(),
            'DesiredCapacity': troposphere.Ref(desired_capacity_param),
            'HealthCheckGracePeriod':
            asg_config.health_check_grace_period_secs,
            'LaunchConfigurationName': troposphere.Ref(launch_config_res),
            'MaxSize': asg_config.max_instances,
            'MinSize': min_instances,
            'Cooldown': asg_config.cooldown_secs,
            'HealthCheckType': asg_config.health_check_type,
            'TerminationPolicies': asg_config.termination_policies,
            'VPCZoneIdentifier': troposphere.Ref(asg_subnet_list_param),
        }

        if asg_config.load_balancers != None and len(
                asg_config.load_balancers) > 0:
            load_balancer_names_param = self.create_cfn_ref_list_param(
                param_type='List<String>',
                name='LoadBalancerNames',
                description=
                'A list of load balancer names to attach to the ASG',
                value=asg_config.load_balancers,
            )
            asg_dict['LoadBalancerNames'] = troposphere.Ref(
                load_balancer_names_param)

        if asg_config.is_enabled():
            if asg_config.target_groups != None and len(
                    asg_config.target_groups) > 0:
                asg_dict['TargetGroupARNs'] = []
                for target_group_arn in asg_config.target_groups:
                    target_group_arn_param = self.create_cfn_parameter(
                        param_type='String',
                        name='TargetGroupARNs' +
                        utils.md5sum(str_data=target_group_arn),
                        description='A Target Group ARNs to attach to the ASG',
                        value=target_group_arn + '.arn',
                    )
                    asg_dict['TargetGroupARNs'].append(
                        troposphere.Ref(target_group_arn_param))


        if asg_config.monitoring != None and \
                asg_config.monitoring.is_enabled() == True and \
                len(asg_config.monitoring.asg_metrics) > 0:
            asg_dict['MetricsCollection'] = [{
                'Granularity':
                '1Minute',
                'Metrics':
                asg_config.monitoring.asg_metrics
            }]

        # ASG Tags
        asg_dict['Tags'] = [
            troposphere.autoscaling.Tag('Name',
                                        asg_dict['AutoScalingGroupName'], True)
        ]

        # EIP
        if asg_config.eip != None and asg_config.is_enabled():
            if references.is_ref(asg_config.eip) == True:
                eip_value = asg_config.eip + '.allocation_id'
            else:
                eip_value = asg_config.eip
            eip_id_param = self.create_cfn_parameter(
                param_type='String',
                name='EIPAllocationId',
                description=
                'The allocation Id of the EIP to attach to the instance.',
                value=eip_value,
            )
            asg_dict['Tags'].append(
                troposphere.autoscaling.Tag('Paco-EIP-Allocation-Id',
                                            troposphere.Ref(eip_id_param),
                                            True))

        # EFS FileSystemId Tags
        if asg_config.is_enabled():
            for efs_mount in asg_config.efs_mounts:
                target_hash = utils.md5sum(str_data=efs_mount.target)
                if references.is_ref(efs_mount.target) == True:
                    efs_value = efs_mount.target + '.id'
                else:
                    efs_value = efs_mount.target
                efs_id_param = self.create_cfn_parameter(
                    param_type='String',
                    name='EFSId' + target_hash,
                    description='EFS Id',
                    value=efs_value,
                )
                asg_tag = troposphere.autoscaling.Tag(
                    'efs-id-' + target_hash, troposphere.Ref(efs_id_param),
                    True)
                asg_dict['Tags'].append(asg_tag)

            # EBS Volume Id and Device name Tags
            for ebs_volume_mount in asg_config.ebs_volume_mounts:
                if ebs_volume_mount.is_enabled() == False:
                    continue
                volume_hash = utils.md5sum(str_data=ebs_volume_mount.volume)
                if references.is_ref(ebs_volume_mount.volume) == True:
                    ebs_volume_id_value = ebs_volume_mount.volume + '.id'
                else:
                    ebs_volume_id_value = ebs_volume_mount.volume
                # Volume Id
                ebs_volume_id_param = self.create_cfn_parameter(
                    param_type='String',
                    name='EBSVolumeId' + volume_hash,
                    description='EBS Volume Id',
                    value=ebs_volume_id_value)
                ebs_volume_id_tag = troposphere.autoscaling.Tag(
                    'ebs-volume-id-' + volume_hash,
                    troposphere.Ref(ebs_volume_id_param), True)
                asg_dict['Tags'].append(ebs_volume_id_tag)
                #ebs_device_param = self.create_cfn_parameter(
                #    param_type='String',
                #    name='EBSDevice'+volume_hash,
                #   description='EBS Device Name',
                #    value=ebs_volume_mount.device,
                #)
                #ebs_device_tag = troposphere.autoscaling.Tag(
                #    'ebs-device-' + volume_hash,
                #    troposphere.Ref(ebs_device_param),
                #    True
                #)
                #asg_dict['Tags'].append(ebs_device_tag)

        asg_res = troposphere.autoscaling.AutoScalingGroup.from_dict(
            'ASG', asg_dict)
        template.add_resource(asg_res)
        asg_res.DependsOn = launch_config_res
        max_batch_size = 1
        min_instances_in_service = 0
        pause_time = 'PT0S'
        wait_on_resource_signals = False
        if asg_config.is_enabled() == True:
            if asg_config.rolling_update_policy != None:
                if asg_config.rolling_update_policy.is_enabled():
                    max_batch_size = asg_config.rolling_update_policy.max_batch_size
                    min_instances_in_service = asg_config.rolling_update_policy.min_instances_in_service
                    pause_time = asg_config.rolling_update_policy.pause_time
                    wait_on_resource_signals = asg_config.rolling_update_policy.wait_on_resource_signals
            else:
                max_batch_size = asg_config.update_policy_max_batch_size
                min_instances_in_service = asg_config.update_policy_min_instances_in_service

        asg_res.UpdatePolicy = troposphere.policies.UpdatePolicy(
            AutoScalingRollingUpdate=troposphere.policies.
            AutoScalingRollingUpdate(
                MaxBatchSize=max_batch_size,
                MinInstancesInService=min_instances_in_service,
                PauseTime=pause_time,
                WaitOnResourceSignals=wait_on_resource_signals))

        self.create_output(title='ASGName',
                           value=troposphere.Ref(asg_res),
                           description='Auto Scaling Group Name',
                           ref=[asg_config_ref, asg_config_ref + '.name'])

        # CPU Scaling Policy
        if asg_config.scaling_policy_cpu_average > 0:
            troposphere.autoscaling.ScalingPolicy(
                title='CPUAverageScalingPolicy',
                template=template,
                AutoScalingGroupName=troposphere.Ref(asg_res),
                PolicyType='TargetTrackingScaling',
                TargetTrackingConfiguration=troposphere.autoscaling.
                TargetTrackingConfiguration(
                    PredefinedMetricSpecification=troposphere.autoscaling.
                    PredefinedMetricSpecification(
                        PredefinedMetricType='ASGAverageCPUUtilization'),
                    TargetValue=float(asg_config.scaling_policy_cpu_average)))

        if asg_config.scaling_policies != None:
            for scaling_policy_name in asg_config.scaling_policies.keys():
                scaling_policy = asg_config.scaling_policies[
                    scaling_policy_name]
                if scaling_policy.is_enabled() == False:
                    continue
                scaling_policy_res = troposphere.autoscaling.ScalingPolicy(
                    title=self.create_cfn_logical_id_join(
                        ['ScalingPolicy', scaling_policy_name],
                        camel_case=True),
                    template=template,
                    AdjustmentType=scaling_policy.adjustment_type,
                    AutoScalingGroupName=troposphere.Ref(asg_res),
                    PolicyType=scaling_policy.policy_type,
                    ScalingAdjustment=scaling_policy.scaling_adjustment,
                    Cooldown=scaling_policy.cooldown)
                alarm_idx = 0
                for alarm in scaling_policy.alarms:
                    dimension_list = []
                    for dimension in alarm.dimensions:
                        dimension_value = dimension.value
                        if dimension.name == 'AutoScalingGroupName' and references.is_ref(
                                dimension.value):
                            # Reference the local ASG if the ref points here
                            dimension_ref = Reference(dimension.value)
                            if dimension_ref.ref == self.config_ref:
                                dimension_value = troposphere.Ref(asg_res)
                        dimension_res = troposphere.cloudwatch.MetricDimension(
                            Name=dimension.name, Value=dimension_value)
                        dimension_list.append(dimension_res)

                    if len(dimension_list) == 0:
                        dimension_list = troposphere.Ref('AWS::NoValue')

                    # Alarm Resource
                    troposphere.cloudwatch.Alarm(
                        title=self.create_cfn_logical_id_join([
                            'ScalingPolicyAlarm', scaling_policy_name,
                            str(alarm_idx)
                        ],
                                                              camel_case=True),
                        template=template,
                        ActionsEnabled=True,
                        AlarmActions=[troposphere.Ref(scaling_policy_res)],
                        AlarmDescription=alarm.alarm_description,
                        ComparisonOperator=alarm.comparison_operator,
                        MetricName=alarm.metric_name,
                        Namespace=alarm.namespace,
                        Period=alarm.period,
                        Threshold=alarm.threshold,
                        EvaluationPeriods=alarm.evaluation_periods,
                        Statistic=alarm.statistic,
                        Dimensions=dimension_list)
                    alarm_idx += 1

        if asg_config.lifecycle_hooks != None:
            for lifecycle_hook_name in asg_config.lifecycle_hooks:
                lifecycle_hook = asg_config.lifecycle_hooks[
                    lifecycle_hook_name]
                if lifecycle_hook.is_enabled() == False:
                    continue
                troposphere.autoscaling.LifecycleHook(
                    title=self.create_cfn_logical_id_join(
                        ['LifecycleHook', lifecycle_hook_name],
                        camel_case=True),
                    template=template,
                    AutoScalingGroupName=troposphere.Ref(asg_res),
                    DefaultResult=lifecycle_hook.default_result,
                    LifecycleTransition=lifecycle_hook.lifecycle_transition,
                    RoleARN=lifecycle_hook.role_arn,
                    NotificationTargetARN=lifecycle_hook.
                    notification_target_arn)

        self.set_template()
示例#17
0
    def __init__(
        self,
        stack,
        paco_ctx,
        role_profile_arn,
        ec2_manager_user_data_script,
        ec2_manager_cache_id
    ):
        self.asg_config = asg_config = stack.resource
        asg_config_ref = asg_config.paco_ref_parts
        self.ec2_manager_cache_id = ec2_manager_cache_id
        super().__init__(stack, paco_ctx, iam_capabilities=["CAPABILITY_NAMED_IAM"])
        self.set_aws_name('ASG', self.resource_group_name, self.resource_name)
        self.instance_iam_role_name = self.paco_ctx.get_ref(asg_config.paco_ref + '.instance_iam_role.name')

        # Troposphere
        self.init_template('AutoScalingGroup: ' + self.ec2_manager_cache_id)
        template = self.template

        if self.asg_config.is_enabled() == False:
            return

        # InstanceAMI Parameter is preserved in disabled templates so it can be smoothly disabled/enabled
        if self.asg_config.instance_ami_ignore_changes:
            ignore_changes = True
        else:
            ignore_changes = False
        instance_ami_param = self.create_cfn_parameter(
            param_type='String',
            name='InstanceAMI',
            description='The Amazon Machine Image Id to launch instances with.',
            value=asg_config.instance_ami,
            ignore_changes=ignore_changes,
        )

        # if the network for the ASG is disabled, only use an empty placeholder
        if not self.asg_config.env_region_obj.network.is_enabled():
            return

        security_group_list_param = self.create_cfn_ref_list_param(
            param_type='List<AWS::EC2::SecurityGroup::Id>',
            name='SecurityGroupList',
            description='List of security group ids to attach to the ASG instances.',
            value=asg_config.security_groups,
            ref_attribute='id',
        )
        launch_config_dict = {
            'AssociatePublicIpAddress': asg_config.associate_public_ip_address,
            'EbsOptimized': asg_config.ebs_optimized,
            'ImageId': troposphere.Ref(instance_ami_param),
            'InstanceMonitoring': asg_config.instance_monitoring,
            'InstanceType': asg_config.instance_type,
            'SecurityGroups': troposphere.Ref(security_group_list_param),
        }

        if asg_config.instance_key_pair != None:
            instance_key_pair_param = self.create_cfn_parameter(
                param_type='String',
                name='InstanceKeyPair',
                description='The EC2 SSH KeyPair to assign each ASG instance.',
                value=asg_config.instance_key_pair+'.keypair_name',
            )
            launch_config_dict['KeyName'] = troposphere.Ref(instance_key_pair_param)

        # BlockDeviceMappings
        if len(asg_config.block_device_mappings) > 0:
            mappings = []
            for bdm in asg_config.block_device_mappings:
                mappings.append(
                    bdm.cfn_export_dict
                )
            launch_config_dict["BlockDeviceMappings"] = mappings

        user_data_script = ''
        if ec2_manager_user_data_script != None:
            user_data_script += ec2_manager_user_data_script
        if asg_config.user_data_script != '':
            user_data_script += asg_config.user_data_script.replace('#!/bin/bash', '')
        if user_data_script != '':
            user_data_64 = base64.b64encode(user_data_script.encode('ascii'))
            user_data_script_param = self.create_cfn_parameter(
                param_type='String',
                name='UserDataScript',
                description='User data script to run at instance launch.',
                value=user_data_64.decode('ascii'),
            )
            launch_config_dict['UserData'] = troposphere.Ref(user_data_script_param)

        if role_profile_arn != None:
            launch_config_dict['IamInstanceProfile'] = role_profile_arn

        # CloudFormation Init
        if asg_config.cfn_init and asg_config.is_enabled():
            launch_config_dict['Metadata'] = troposphere.autoscaling.Metadata(
                asg_config.cfn_init.export_as_troposphere()
            )
            for key, value in asg_config.cfn_init.parameters.items():
                if type(value) == type(str()):
                    param_type = 'String'
                elif type(value) == type(int()) or type(value) == type(float()):
                    param_type = 'Number'
                else:
                    raise UnsupportedCloudFormationParameterType(
                        "Can not cast {} of type {} to a CloudFormation Parameter type.".format(
                            value, type(value)
                        )
                    )
                cfn_init_param = self.create_cfn_parameter(
                    param_type=param_type,
                    name=key,
                    description='CloudFormation Init Parameter {} for ASG {}'.format(key, asg_config.name),
                    value=value,
                )

        # Launch Configuration resource
        launch_config_res = troposphere.autoscaling.LaunchConfiguration.from_dict(
            'LaunchConfiguration',
            launch_config_dict
        )
        template.add_resource(launch_config_res)

        subnet_list_ref = asg_config.env_region_obj.network.vpc.segments[asg_config.segment].paco_ref
        if asg_config.availability_zone == 'all':
            subnet_list_ref += '.subnet_id_list'
        else:
            subnet_list_ref += '.az{}.subnet_id'.format(asg_config.availability_zone)


        asg_subnet_list_param = self.create_cfn_parameter(
            param_type='List<AWS::EC2::Subnet::Id>',
            name='ASGSubnetList',
            description='A list of subnets where the ASG will launch instances',
            value=subnet_list_ref
        )

        min_instances = asg_config.min_instances if asg_config.is_enabled() else 0
        desired_capacity = asg_config.desired_capacity if asg_config.is_enabled() else 0
        desired_capacity_param = self.create_cfn_parameter(
            param_type='String',
            name='DesiredCapacity',
            description='The desired capacity of instances to run in the ASG.',
            value=desired_capacity,
            ignore_changes=self.asg_config.desired_capacity_ignore_changes,
        )
        asg_dict = {
            'AutoScalingGroupName': asg_config.get_aws_name(),
            'DesiredCapacity': troposphere.Ref(desired_capacity_param),
            'HealthCheckGracePeriod': asg_config.health_check_grace_period_secs,
            'LaunchConfigurationName': troposphere.Ref(launch_config_res),
            'MaxSize': asg_config.max_instances,
            'MinSize': min_instances,
            'Cooldown': asg_config.cooldown_secs,
            'HealthCheckType': asg_config.health_check_type,
            'TerminationPolicies': asg_config.termination_policies,
            'VPCZoneIdentifier': troposphere.Ref(asg_subnet_list_param),
        }

        if asg_config.load_balancers != None and len(asg_config.load_balancers) > 0:
            load_balancer_names_param = self.create_cfn_ref_list_param(
                param_type='List<String>',
                name='LoadBalancerNames',
                description='A list of load balancer names to attach to the ASG',
                value=asg_config.load_balancers,
            )
            asg_dict['LoadBalancerNames'] = troposphere.Ref(load_balancer_names_param)

        if asg_config.is_enabled() and asg_config.disable_target_groups == False:
            if asg_config.target_groups != None and len(asg_config.target_groups) > 0:
                asg_dict['TargetGroupARNs'] = []
                for target_group_arn in asg_config.target_groups:
                    target_group_arn_param = self.create_cfn_parameter(
                        param_type='String',
                        name='TargetGroupARNs'+utils.md5sum(str_data=target_group_arn),
                        description='A Target Group ARNs to attach to the ASG',
                        value=target_group_arn+'.arn',
                    )
                    asg_dict['TargetGroupARNs'].append(troposphere.Ref(target_group_arn_param))


        if asg_config.monitoring != None and \
                asg_config.monitoring.is_enabled() == True and \
                len(asg_config.monitoring.asg_metrics) > 0:
            asg_dict['MetricsCollection'] = [{
                'Granularity': '1Minute',
                'Metrics': asg_config.monitoring.asg_metrics
            }]

        # ASG Tags
        asg_dict['Tags'] = [
            troposphere.autoscaling.Tag('Name', asg_dict['AutoScalingGroupName'], True)
        ]

        # TODO: DNS: To be enabled once cftempaltes/iam_managed_policies.py
        #            is ported to troposphere
        # if len(asg_config.dns) > 0 and asg_config.is_enabled():
        #     idx = 0
        #     for dns_config in asg_config.dns:
        #         if references.is_ref(dns_config.hosted_zone):
        #             hosted_zone_value = dns_config.hosted_zone+'.id'
        #         else:
        #             hosted_zone_value = dns_config.hosted_zone
        #         dns_hosted_zone_param = self.create_cfn_parameter(
        #             param_type='String',
        #             name=f'DNSHostedZone{idx}',
        #             description=f'DNS Hosted Zone for index {idx}',
        #             value=dns_value
        #         )
        #         asg_dict['Tags'].append(
        #             troposphere.autoscaling.Tag(f'Paco-DNS-Hosted-Zone-{idx}', troposphere.Ref(dns_hosted_zone_param), True)
        #         )
        #         dns_domain_param = self.create_cfn_parameter(
        #             param_type='String',
        #             name=f'DNSDomain{idx}',
        #             description=f'DNS Domain name for index {idx}',
        #             value=dns_value
        #         )
        #         asg_dict['Tags'].append(
        #             troposphere.autoscaling.Tag(f'Paco-DNS-Domain-{idx}', troposphere.Ref(dns_domain_param), True)
        #         )

        #         idx += 1

        # EIP
        if asg_config.eip != None and asg_config.is_enabled():
            if references.is_ref(asg_config.eip) == True:
                eip_value = asg_config.eip + '.allocation_id'
            else:
                eip_value = asg_config.eip
            eip_id_param = self.create_cfn_parameter(
                param_type='String',
                name='EIPAllocationId',
                description='The allocation Id of the EIP to attach to the instance.',
                value=eip_value,
            )
            asg_dict['Tags'].append(
                troposphere.autoscaling.Tag('Paco-EIP-Allocation-Id', troposphere.Ref(eip_id_param), True)
            )

        # EFS FileSystemId Tags
        if asg_config.is_enabled():
            for efs_mount in asg_config.efs_mounts:
                target_hash = utils.md5sum(str_data=efs_mount.target)
                if references.is_ref(efs_mount.target) == True:
                    efs_value = efs_mount.target + '.id'
                else:
                    efs_value = efs_mount.target
                efs_id_param = self.create_cfn_parameter(
                    param_type='String',
                    name='EFSId'+target_hash,
                    description='EFS Id',
                    value=efs_value,
                )
                asg_tag = troposphere.autoscaling.Tag(
                    'efs-id-' + target_hash,
                    troposphere.Ref(efs_id_param),
                    True
                )
                asg_dict['Tags'].append(asg_tag)

            # EBS Volume Id and Device name Tags
            for ebs_volume_mount in asg_config.ebs_volume_mounts:
                if ebs_volume_mount.is_enabled() == False:
                    continue
                volume_hash = utils.md5sum(str_data=ebs_volume_mount.volume)
                if references.is_ref(ebs_volume_mount.volume) == True:
                    ebs_volume_id_value = ebs_volume_mount.volume + '.id'
                else:
                    ebs_volume_id_value = ebs_volume_mount.volume
                # Volume Id
                ebs_volume_id_param = self.create_cfn_parameter(
                    param_type='String',
                    name='EBSVolumeId'+volume_hash,
                    description='EBS Volume Id',
                    value=ebs_volume_id_value
                )
                ebs_volume_id_tag = troposphere.autoscaling.Tag(
                    'ebs-volume-id-' + volume_hash,
                    troposphere.Ref(ebs_volume_id_param),
                    True
                )
                asg_dict['Tags'].append(ebs_volume_id_tag)
                #ebs_device_param = self.create_cfn_parameter(
                #    param_type='String',
                #    name='EBSDevice'+volume_hash,
                #   description='EBS Device Name',
                #    value=ebs_volume_mount.device,
                #)
                #ebs_device_tag = troposphere.autoscaling.Tag(
                #    'ebs-device-' + volume_hash,
                #    troposphere.Ref(ebs_device_param),
                #    True
                #)
                #asg_dict['Tags'].append(ebs_device_tag)

        # ECS Cluster Configuration
        if asg_config.is_enabled() and asg_config.ecs != None:
            ecs_cluster_name_param = self.create_cfn_parameter(
                param_type='String',
                name='ECSClusterName',
                description='ECS Cluster Name',
                value=asg_config.ecs.cluster + '.name'
            )
            asg_tag = troposphere.autoscaling.Tag(
                'Paco-ECSCluster-Name',
                troposphere.Ref(ecs_cluster_name_param),
                True
            )
            asg_dict['Tags'].append(asg_tag)
            # ECS Cluster Capacity Manager requries NewInstancesProtectedFromScaleIn to be eneabled if it is going to manage instance protection
            if asg_config.ecs.capacity_provider != None and asg_config.ecs.capacity_provider.is_enabled():
                if asg_config.ecs.capacity_provider.managed_instance_protection == True:
                    asg_dict['NewInstancesProtectedFromScaleIn'] = True

        # ECS Release Phase Configuration
        policy_statements = []
        if asg_config.script_manager:
            if asg_config.script_manager.ecr_deploy:
                self.script_manager_ecr_deploy(asg_config.script_manager.ecr_deploy, asg_dict, asg_config, template)
            if asg_config.script_manager.ecs:
                self.script_manager_ecs(asg_config.script_manager.ecs, asg_dict, asg_config, template)
        # ECR Repository access
        self.set_ecr_repositories_statements(
            asg_config.ecr,
            template,
            'ECRAccess',
            [self.instance_iam_role_name]
        )
        asg_res = troposphere.autoscaling.AutoScalingGroup.from_dict(
            'ASG',
            asg_dict
        )
        template.add_resource(asg_res)
        asg_res.DependsOn = launch_config_res

        # only create an UpdatePolicy if it is enabled
        update_policy = asg_config.rolling_update_policy
        if update_policy.enabled == True:
            if update_policy.pause_time == '' and update_policy.wait_on_resource_signals == True:
                # if wait_on_resource_signals is true the default pause time is 5 minutes
                update_policy.pause_time = 'PT5M'
            elif update_policy.pause_time == '':
                update_policy.pause_time = 'PT0S'

            min_instances_in_service_param = self.create_cfn_parameter(
            param_type='String',
            name='MinInstancesInService',
            description='Rolling update minimum instances to remain in service during update.',
            value=update_policy.min_instances_in_service
            )

            # UpdatePolicy properties
            asg_res.UpdatePolicy = troposphere.policies.UpdatePolicy(
                AutoScalingRollingUpdate=troposphere.policies.AutoScalingRollingUpdate(
                    MaxBatchSize=update_policy.max_batch_size,
                    MinInstancesInService=troposphere.Ref(min_instances_in_service_param),
                    PauseTime=update_policy.pause_time,
                    WaitOnResourceSignals=update_policy.wait_on_resource_signals,
                    SuspendProcesses=[
                        'HealthCheck',
                        'ReplaceUnhealthy',
                        'AlarmNotification',
                        'ScheduledActions'
                    ]
                )
            )

        self.create_output(
            title='ASGName',
            value=troposphere.Ref(asg_res),
            description='Auto Scaling Group Name',
            ref=[asg_config_ref, asg_config_ref+'.name']
        )

        # CPU Scaling Policy
        if asg_config.scaling_policy_cpu_average > 0:
            troposphere.autoscaling.ScalingPolicy(
                title='CPUAverageScalingPolicy',
                template=template,
                AutoScalingGroupName=troposphere.Ref(asg_res),
                PolicyType='TargetTrackingScaling',
                TargetTrackingConfiguration=troposphere.autoscaling.TargetTrackingConfiguration(
                    PredefinedMetricSpecification=troposphere.autoscaling.PredefinedMetricSpecification(
                        PredefinedMetricType='ASGAverageCPUUtilization'
                    ),
                    TargetValue=float(asg_config.scaling_policy_cpu_average)
                )
            )

        if asg_config.scaling_policies != None:
            for scaling_policy_name in asg_config.scaling_policies.keys():
                scaling_policy = asg_config.scaling_policies[scaling_policy_name]
                if scaling_policy.is_enabled() == False:
                    continue
                scaling_policy_res = troposphere.autoscaling.ScalingPolicy(
                    title=self.create_cfn_logical_id_join(
                        ['ScalingPolicy', scaling_policy_name],
                        camel_case=True
                    ),
                    template=template,
                    AdjustmentType=scaling_policy.adjustment_type,
                    AutoScalingGroupName=troposphere.Ref(asg_res),
                    PolicyType=scaling_policy.policy_type,
                    ScalingAdjustment=scaling_policy.scaling_adjustment,
                    Cooldown=scaling_policy.cooldown
                )
                alarm_idx = 0
                for alarm in scaling_policy.alarms:
                    dimension_list = []
                    for dimension in alarm.dimensions:
                        dimension_value = dimension.value
                        if dimension.name == 'AutoScalingGroupName' and references.is_ref(dimension.value):
                            # Reference the local ASG if the ref points here
                            dimension_ref = Reference(dimension.value)
                            if dimension_ref.ref == self.config_ref:
                                dimension_value = troposphere.Ref(asg_res)
                        dimension_res = troposphere.cloudwatch.MetricDimension(
                            Name=dimension.name,
                            Value=dimension_value
                        )
                        dimension_list.append(dimension_res)

                    if len(dimension_list) == 0:
                        dimension_list = troposphere.Ref('AWS::NoValue')

                    # Alarm Resource
                    troposphere.cloudwatch.Alarm(
                        title=self.create_cfn_logical_id_join(
                            ['ScalingPolicyAlarm', scaling_policy_name, str(alarm_idx)],
                            camel_case=True
                        ),
                        template=template,
                        ActionsEnabled=True,
                        AlarmActions=[troposphere.Ref(scaling_policy_res)],
                        AlarmDescription=alarm.alarm_description,
                        ComparisonOperator=alarm.comparison_operator,
                        MetricName=alarm.metric_name,
                        Namespace=alarm.namespace,
                        Period=alarm.period,
                        Threshold=alarm.threshold,
                        EvaluationPeriods=alarm.evaluation_periods,
                        Statistic=alarm.statistic,
                        Dimensions=dimension_list
                    )
                    alarm_idx += 1

        if asg_config.lifecycle_hooks != None:
            for lifecycle_hook_name in asg_config.lifecycle_hooks:
                lifecycle_hook = asg_config.lifecycle_hooks[lifecycle_hook_name]
                if lifecycle_hook.is_enabled() == False:
                    continue
                troposphere.autoscaling.LifecycleHook(
                    title = self.create_cfn_logical_id_join(
                        ['LifecycleHook', lifecycle_hook_name],
                        camel_case=True
                    ),
                    template=template,
                    AutoScalingGroupName=troposphere.Ref(asg_res),
                    DefaultResult=lifecycle_hook.default_result,
                    LifecycleTransition=lifecycle_hook.lifecycle_transition,
                    RoleARN=lifecycle_hook.role_arn,
                    NotificationTargetARN=lifecycle_hook.notification_target_arn
                )

        if asg_config.patch_manager != None and asg_config.patch_manager.is_enabled():
            patch_ssm_associate_dict = {
                'AssociationName': f'OpusPatchBaseline{asg_config.patch_manager.operation}',
                'Name': 'AWS-RunPatchBaseline',
                'ScheduleExpression': asg_config.patch_manager.schedule_expression,
                'Targets': [{
                    'Key': 'tag:Name',
                    'Values': [asg_config.get_aws_name()]
                }],
                'Parameters': {
                    'Operation': [asg_config.patch_manager.operation]
                },
                'WaitForSuccessTimeoutSeconds': 900
            }
            patch_ssm_associate_res = troposphere.ssm.Association.from_dict(
                'PatchAssociation',
                patch_ssm_associate_dict
            )
            template.add_resource(patch_ssm_associate_res)
示例#18
0
    def __init__(self, stack, paco_ctx):
        super().__init__(
            stack,
            paco_ctx,
            iam_capabilities=["CAPABILITY_NAMED_IAM"],
        )
        eventsrule = stack.resource
        config_ref = eventsrule.paco_ref_parts
        self.set_aws_name('EventsRule', self.resource_group_name,
                          self.resource_name)

        self.notification_groups = {}

        # Init a Troposphere template
        self.init_template('CloudWatch EventsRule')

        if eventsrule.is_enabled() == False:
            return

        # Parameters
        schedule_expression_param = None
        if eventsrule.schedule_expression:
            schedule_expression_param = self.create_cfn_parameter(
                param_type='String',
                name='ScheduleExpression',
                description='ScheduleExpression for the Event Rule.',
                value=eventsrule.schedule_expression,
            )
        description_param = self.create_cfn_parameter(
            param_type='String',
            name='EventDescription',
            description='Description for the Event Rule.',
            value=eventsrule.description,
        )

        # Monitoring Target
        monitoring = self.resource.monitoring
        if monitoring != None and monitoring.is_enabled() == True:
            notifications = None
            if monitoring.notifications != None and len(
                    monitoring.notifications.keys()) > 0:
                notifications = monitoring.notifications
            else:
                app_config = get_parent_by_interface(self.resource,
                                                     schemas.IApplication)
                notifications = app_config.notifications

            if notifications != None and len(notifications.keys()) > 0:
                # Create the CF Param for the SNS ARN we need to Publish to
                notify_param_cache = []
                for notify_group_name in notifications.keys():
                    for sns_group_name in notifications[
                            notify_group_name].groups:
                        notify_param = self.create_notification_param(
                            sns_group_name)
                        # Only append if the are unique
                        if notify_param not in notify_param_cache:
                            eventsrule.targets.append(notify_param)
                            notify_param_cache.append(notify_param)

        # Targets
        targets = []
        self.target_params = {}
        target_invocation_role_resource = None
        for index in range(0, len(eventsrule.targets)):
            target = eventsrule.targets[index]
            # Target Parameters
            target_name = 'Target{}'.format(index)

            # Target CFN Parameters
            # Check if we already have a parameter object
            target_policy_actions = None
            if isinstance(target, troposphere.Parameter):
                self.target_params[target_name + 'Arn'] = target
            else:
                self.target_params[target_name +
                                   'Arn'] = self.create_cfn_parameter(
                                       param_type='String',
                                       name=target_name + 'Arn',
                                       description=target_name +
                                       ' Arn for the Events Rule.',
                                       value=target.target + '.arn',
                                   )

                # If the target is a reference, get the target object from the model
                # to check what type of resource we need to configure for
                target_ref = Reference(target.target)
                if target_ref.parts[-1] == 'project' and target_ref.parts[
                        -3] == 'build':
                    codebuild_target_ref = f'paco.ref {".".join(target_ref.parts[:-1])}'
                    target_model_obj = get_model_obj_from_ref(
                        codebuild_target_ref, self.paco_ctx.project)
                else:
                    target_model_obj = get_model_obj_from_ref(
                        target.target, self.paco_ctx.project)

                # Lambda Policy Actions
                if schemas.IDeploymentPipelineBuildCodeBuild.providedBy(
                        target_model_obj):
                    # CodeBuild Project
                    target_policy_actions = [awacs.codebuild.StartBuild]
                elif schemas.ILambda.providedBy(target_model_obj):
                    # Lambda Function
                    target_policy_actions = [awacs.awslambda.InvokeFunction]

            self.target_params[target_name] = self.create_cfn_parameter(
                param_type='String',
                name=target_name,
                description=target_name + ' for the Event Rule.',
                value=target_name,
            )

            # IAM Role Polcies by Resource type
            if target_policy_actions != None:
                # IAM Role Resources to allow Event to invoke Target
                target_invocation_role_resource = troposphere.iam.Role(
                    'TargetInvocationRole',
                    AssumeRolePolicyDocument=Policy(
                        Version='2012-10-17',
                        Statement=[
                            Statement(Effect=Allow,
                                      Action=[awacs.sts.AssumeRole],
                                      Principal=Principal(
                                          'Service', ['events.amazonaws.com']))
                        ],
                    ),
                    Policies=[
                        troposphere.iam.Policy(
                            PolicyName="TargetInvocation",
                            PolicyDocument=Policy(
                                Version='2012-10-17',
                                Statement=[
                                    Statement(
                                        Effect=Allow,
                                        Action=target_policy_actions,
                                        Resource=[
                                            troposphere.Ref(
                                                self.target_params[target_name
                                                                   + 'Arn'])
                                        ],
                                    )
                                ]))
                    ],
                )
                self.template.add_resource(target_invocation_role_resource)

            # Create Target CFN Resources
            cfn_export_dict = {
                'Arn':
                troposphere.Ref(self.target_params[target_name + 'Arn']),
                'Id': troposphere.Ref(self.target_params[target_name])
            }

            if target_invocation_role_resource != None:
                cfn_export_dict['RoleArn'] = troposphere.GetAtt(
                    target_invocation_role_resource, 'Arn')
            if hasattr(target, 'input_json') and target.input_json != None:
                cfn_export_dict['Input'] = target.input_json

            # Events Rule Targets
            targets.append(cfn_export_dict)

        # Events Rule Resource
        # The Name is needed so that a Lambda can be created and it's Lambda ARN output
        # can be supplied as a Parameter to this Stack and a Lambda Permission can be
        # made with the Lambda. Avoids circular dependencies.
        name = create_event_rule_name(eventsrule)
        if eventsrule.enabled_state:
            enabled_state = 'ENABLED'
        else:
            enabled_state = 'DISABLED'

        events_rule_dict = {
            'Name': name,
            'Description': troposphere.Ref(description_param),
            'Targets': targets,
            'State': enabled_state
        }

        if target_invocation_role_resource != None:
            events_rule_dict['RoleArn'] = troposphere.GetAtt(
                target_invocation_role_resource, 'Arn')

        if schedule_expression_param != None:
            events_rule_dict['ScheduleExpression'] = troposphere.Ref(
                schedule_expression_param)
        elif eventsrule.event_pattern != None:
            source_value_list = []
            project_name_list = []
            for pattern_source in eventsrule.event_pattern.source:
                if is_ref(pattern_source):
                    source_obj = get_model_obj_from_ref(
                        pattern_source, self.paco_ctx.project)
                    if schemas.IDeploymentPipelineBuildCodeBuild.providedBy(
                            source_obj):
                        source_value_list.append('aws.codebuild')
                        project_name_list.append(
                            source_obj._stack.template.get_project_name())
                    else:
                        raise InvalidEventsRuleEventPatternSource(
                            pattern_source)
                else:
                    source_value_list.append(pattern_source)

            if len(project_name_list) > 0:
                eventsrule.event_pattern.detail[
                    'project-name'] = project_name_list

            event_pattern_dict = {
                'source':
                source_value_list,
                'detail-type':
                utils.obj_to_dict(eventsrule.event_pattern.detail_type),
                'detail':
                utils.obj_to_dict(eventsrule.event_pattern.detail),
            }
            event_pattern_yaml = yaml.dump(event_pattern_dict)
            events_rule_dict['EventPattern'] = yaml.load(event_pattern_yaml)
        else:
            # Defaults to a CodePipeline events rule
            event_pattern_yaml = """
source:
    - aws.codepipeline
detail-type:
    - 'CodePipeline Pipeline Execution State Change'
detail:
    state:
    - STARTED
"""
            events_rule_dict['EventPattern'] = yaml.load(event_pattern_yaml)

        event_rule_resource = troposphere.events.Rule.from_dict(
            'EventRule', events_rule_dict)
        if target_invocation_role_resource != None:
            event_rule_resource.DependsOn = target_invocation_role_resource
        self.template.add_resource(event_rule_resource)

        # Outputs
        self.create_output(
            title="EventRuleId",
            value=troposphere.Ref(event_rule_resource),
            ref=config_ref + '.id',
        )
        self.create_output(
            title="EventRuleArn",
            value=troposphere.GetAtt(event_rule_resource, "Arn"),
            ref=config_ref + '.arn',
        )
示例#19
0
    def __init__(self, stack, paco_ctx):
        esdomain = stack.resource
        super().__init__(stack, paco_ctx)
        self.set_aws_name('ESDomain', self.resource_group_name,
                          self.resource_name)
        self.esdomain = esdomain
        self.init_template('Elasticsearch Domain')

        # if disabled then leave an empty placeholder and finish
        if not esdomain.is_enabled(): return

        # Parameters
        elasticsearch_version_param = self.create_cfn_parameter(
            name='ElasticsearchVersion',
            param_type='String',
            description='The version of Elasticsearch to use, such as 2.3.',
            value=self.esdomain.elasticsearch_version)

        if esdomain.segment != None:
            subnet_params = []
            segment_ref = esdomain.env_region_obj.network.vpc.segments[
                esdomain.segment].paco_ref
            if esdomain.cluster != None:
                if esdomain.cluster.zone_awareness_enabled:
                    azs = esdomain.cluster.zone_awareness_availability_zone_count
                else:
                    azs = 1
            else:
                azs = 2
            for az_idx in range(1, azs + 1):
                subnet_params.append(
                    self.create_cfn_parameter(
                        param_type='String',
                        name='ESDomainSubnet{}'.format(az_idx),
                        description='A subnet for the Elasticsearch Domain',
                        value='{}.az{}.subnet_id'.format(segment_ref, az_idx)))

        if esdomain.security_groups:
            sg_params = []
            vpc_sg_list = []
            for sg_ref in esdomain.security_groups:
                ref = Reference(sg_ref)
                sg_param_name = 'SecurityGroupId' + ref.parts[-2] + ref.parts[
                    -1]
                sg_param = self.create_cfn_parameter(
                    name=sg_param_name,
                    param_type='String',
                    description='Security Group Id',
                    value=sg_ref + '.id',
                )
                sg_params.append(sg_param)
                vpc_sg_list.append(troposphere.Ref(sg_param))

        # ElasticsearchDomain resource
        esdomain_logical_id = 'ElasticsearchDomain'
        cfn_export_dict = esdomain.cfn_export_dict
        if esdomain.access_policies_json != None:
            cfn_export_dict['AccessPolicies'] = json.loads(
                esdomain.access_policies_json)

        # ToDo: VPC currently fails as there needs to be a service-linked role for es.amazonaws.com
        # to allow it to create the ENI
        if esdomain.segment != None:
            cfn_export_dict['VPCOptions'] = {
                'SubnetIds':
                [troposphere.Ref(param) for param in subnet_params]
            }
            if esdomain.security_groups:
                cfn_export_dict['VPCOptions']['SecurityGroupIds'] = vpc_sg_list

        esdomain_resource = troposphere.elasticsearch.ElasticsearchDomain.from_dict(
            esdomain_logical_id,
            cfn_export_dict,
        )
        self.template.add_resource(esdomain_resource)

        # Outputs
        self.create_output(
            title='Name',
            value=troposphere.Ref(esdomain_resource),
            description='ElasticsearchDomain name',
            ref=esdomain.paco_ref_parts + '.name',
        )
        self.create_output(
            title='Arn',
            description='Arn of the domain. The same value as DomainArn.',
            value=troposphere.GetAtt(esdomain_resource, 'Arn'),
            ref=esdomain.paco_ref_parts + '.arn',
        )
        self.create_output(
            title='DomainArn',
            description='DomainArn of the domain. The same value as Arn.',
            value=troposphere.GetAtt(esdomain_resource, "DomainArn"),
            ref=esdomain.paco_ref_parts + '.domainarn',
        )
        self.create_output(
            title='DomainEndpoint',
            description=
            "The domain-specific endpoint that's used to submit index, search, and data upload requests to an Amazon ES domain.",
            value=troposphere.GetAtt(esdomain_resource, 'DomainEndpoint'),
            ref=esdomain.paco_ref_parts + '.domainendpoint',
        )
示例#20
0
    def __init__(self, stack, paco_ctx, env_ctx):
        elasticache_config = stack.resource
        config_ref = elasticache_config.paco_ref_parts
        super().__init__(stack, paco_ctx)
        self.set_aws_name('ElastiCache', self.resource_group_name,
                          self.resource.name, elasticache_config.engine)

        # Troposphere Template Generation
        self.init_template('ElastiCache: {} - {}'.format(
            elasticache_config.engine, elasticache_config.engine_version))

        # if disabled then leave an empty placeholder and finish
        if not elasticache_config.is_enabled(): return

        # Security Groups
        sg_params = []
        vpc_sg_list = []
        for sg_ref in elasticache_config.security_groups:
            ref = Reference(sg_ref)
            sg_param_name = self.create_cfn_logical_id('SecurityGroupId' +
                                                       ref.parts[-2] +
                                                       ref.parts[-1])
            sg_param = self.create_cfn_parameter(
                name=sg_param_name,
                param_type='String',
                description='VPC Security Group Id',
                value=sg_ref + '.id',
            )
            sg_params.append(sg_param)
            vpc_sg_list.append(troposphere.Ref(sg_param))

        # Subnet Ids
        subnet_ids_param = self.create_cfn_parameter(
            name='SubnetIdList',
            param_type='List<String>',
            description='List of Subnet Ids to provision ElastiCache nodes',
            value=elasticache_config.segment + '.subnet_id_list',
        )

        # ElastiCache Subnet Group
        subnet_group_dict = {
            'Description': troposphere.Ref('AWS::StackName'),
            'SubnetIds': troposphere.Ref(subnet_ids_param)
        }
        subnet_group_res = troposphere.elasticache.SubnetGroup.from_dict(
            'SubnetGroup', subnet_group_dict)
        self.template.add_resource(subnet_group_res)

        # ElastiCache Resource
        elasticache_dict = elasticache_config.cfn_export_dict
        elasticache_dict['SecurityGroupIds'] = vpc_sg_list
        elasticache_dict['CacheSubnetGroupName'] = troposphere.Ref(
            subnet_group_res)
        if elasticache_config.description:
            elasticache_dict[
                'ReplicationGroupDescription'] = elasticache_config.description
        else:
            elasticache_dict['ReplicationGroupDescription'] = troposphere.Ref(
                'AWS::StackName')

        cfn_cache_cluster_name = 'ReplicationGroup'
        cache_cluster_res = troposphere.elasticache.ReplicationGroup.from_dict(
            cfn_cache_cluster_name, elasticache_dict)
        self.template.add_resource(cache_cluster_res)

        # Outputs
        self.create_output(title='PrimaryEndPointAddress',
                           description='ElastiCache PrimaryEndpoint Address',
                           value=troposphere.GetAtt(cache_cluster_res,
                                                    'PrimaryEndPoint.Address'),
                           ref=config_ref + ".primaryendpoint.address")
        self.create_output(title='PrimaryEndPointPort',
                           description='ElastiCache PrimaryEndpoint Port',
                           value=troposphere.GetAtt(cache_cluster_res,
                                                    'PrimaryEndPoint.Port'),
                           ref=config_ref + ".primaryendpoint.port")
        self.create_output(title='ReadEndPointAddresses',
                           description='ElastiCache ReadEndpoint Addresses',
                           value=troposphere.GetAtt(cache_cluster_res,
                                                    'ReadEndPoint.Addresses'),
                           ref=config_ref + ".readendpoint.addresses")
        self.create_output(
            title='ReadEndPointPorts',
            description='ElastiCache ReadEndpoint Ports',
            value=troposphere.GetAtt(cache_cluster_res, 'ReadEndPoint.Ports'),
            ref=config_ref + ".readendpoint.ports",
        )

        route53_ctl = self.paco_ctx.get_controller('route53')
        for ec_dns in self.resource.dns:
            if self.resource.is_dns_enabled() == True:
                # alias_dns_ref = self.resource.paco_ref + '.dnsname'
                # alias_hosted_zone_ref = self.resource.paco_ref + '.canonicalhostedzoneid'
                hosted_zone = get_model_obj_from_ref(ec_dns.hosted_zone,
                                                     self.paco_ctx.project)
                account_ctx = self.paco_ctx.get_account_context(
                    account_ref=hosted_zone.account)
                route53_ctl.add_record_set(
                    account_ctx,
                    self.aws_region,
                    self.resource,
                    enabled=self.resource.is_enabled(),
                    dns=ec_dns,
                    record_set_type='CNAME',
                    resource_records=[
                        f'{self.resource.paco_ref}.primaryendpoint.address'
                    ],
                    stack_group=self.stack.stack_group,
                    async_stack_provision=True,
                    config_ref=self.resource.paco_ref_parts + '.dns')
示例#21
0
    def __init__(self, stack, paco_ctx):
        cip = stack.resource
        super().__init__(stack, paco_ctx, iam_capabilities=["CAPABILITY_IAM"])
        self.set_aws_name('CIP', self.resource_group_name, self.resource.name)

        self.init_template('Cognito Identity Pool')
        if not cip.is_enabled():
            return

        # Cognito Identity Pool
        cfn_export_dict = cip.cfn_export_dict
        if len(cip.identity_providers) > 0:
            idps = []
            up_client_params = {}
            up_params = {}
            for idp in cip.identity_providers:
                # replace <region> and <account> for refs in Services
                up_client_ref = Reference(idp.userpool_client)
                up_client_ref.set_account_name(self.account_ctx.get_name())
                up_client_ref.set_region(self.aws_region)
                userpool_client = up_client_ref.get_model_obj(self.paco_ctx.project)
                if up_client_ref.ref not in up_client_params:
                    up_client_name = self.create_cfn_logical_id(f'UserPoolClient{userpool_client.name}' + md5sum(str_data=up_client_ref.ref))
                    value = f'paco.ref {up_client_ref.ref }.id'
                    up_client_params[up_client_ref.ref] = self.create_cfn_parameter(
                        param_type='String',
                        name=up_client_name,
                        description=f'UserPool Client Id for {userpool_client.name}',
                        value=value,
                    )
                userpool = get_parent_by_interface(userpool_client, ICognitoUserPool)
                userpool_ref = userpool.paco_ref
                if userpool_ref not in up_params:
                    up_name = self.create_cfn_logical_id(f'UserPool{userpool.name}' + md5sum(str_data=userpool_ref))
                    up_params[userpool_ref] = self.create_cfn_parameter(
                        param_type='String',
                        name=up_name,
                        description=f'UserPool ProviderName for {userpool.name}',
                        value=userpool_ref + '.providername',
                    )
                idps.append({
                    "ClientId" : troposphere.Ref(up_client_params[up_client_ref.ref]),
                    "ProviderName" : troposphere.Ref(up_params[userpool_ref]),
                    "ServerSideTokenCheck" : idp.serverside_token_check,
                })
            cfn_export_dict['CognitoIdentityProviders'] = idps
        cip_resource = troposphere.cognito.IdentityPool.from_dict(
            'CognitoIdentityPool',
            cfn_export_dict
        )
        self.template.add_resource(cip_resource)

        # Outputs
        self.create_output(
            title=cip_resource.title + 'Id',
            description="Cognito Identity Pool Id",
            value=troposphere.Ref(cip_resource),
            ref=[cip.paco_ref_parts, cip.paco_ref_parts + ".id"],
        )

        # Roles
        roles_dict = {}

        unauthenticated_assume_role_policy = PolicyDocument(
            Statement=[
                Statement(
                    Effect=Allow,
                    Principal=Principal('Federated',"cognito-identity.amazonaws.com"),
                    Action=[Action('sts', 'AssumeRoleWithWebIdentity')],
                    Condition=Condition([
                        StringEquals({"cognito-identity.amazonaws.com:aud": troposphere.Ref(cip_resource)}),
                        ForAnyValueStringLike({"cognito-identity.amazonaws.com:amr": "unauthenticated"})
                    ]),
                ),
            ],
        )
        unauthenticated_role_resource = role_to_troposphere(
            cip.unauthenticated_role,
            'UnauthenticatedRole',
            assume_role_policy=unauthenticated_assume_role_policy,
        )
        if unauthenticated_role_resource != None:
            self.template.add_resource(unauthenticated_role_resource)
            roles_dict['unauthenticated'] = troposphere.GetAtt(unauthenticated_role_resource, "Arn")

        authenticated_assume_role_policy = PolicyDocument(
            Statement=[
                Statement(
                    Effect=Allow,
                    Principal=Principal('Federated',"cognito-identity.amazonaws.com"),
                    Action=[Action('sts', 'AssumeRoleWithWebIdentity')],
                    Condition=Condition([
                        StringEquals({"cognito-identity.amazonaws.com:aud": troposphere.Ref(cip_resource)}),
                        ForAnyValueStringLike({"cognito-identity.amazonaws.com:amr": "authenticated"})
                    ]),
                ),
            ],
        )
        authenticated_role_resource = role_to_troposphere(
            cip.authenticated_role,
            'AuthenticatedRole',
            assume_role_policy=authenticated_assume_role_policy
        )
        if authenticated_role_resource != None:
            self.template.add_resource(authenticated_role_resource)
            roles_dict['authenticated'] = troposphere.GetAtt(authenticated_role_resource, "Arn")

        # Identity Pool Role Attachment
        if roles_dict:
            iproleattachment_resource = troposphere.cognito.IdentityPoolRoleAttachment(
                title='IdentityPoolRoleAttachment',
                IdentityPoolId=troposphere.Ref(cip_resource),
                Roles=roles_dict,
            )
            self.template.add_resource(iproleattachment_resource)
示例#22
0
    def __init__(
        self,
        paco_ctx,
        account_ctx,
        aws_region,
        stack_group,
        stack_tags,
        app_id,
        grp_id,
        res_id,
        elasticache_config,
        config_ref=None
    ):
        super().__init__(
            paco_ctx,
            account_ctx,
            aws_region,
            enabled=elasticache_config.is_enabled(),
            config_ref=config_ref,
            stack_group=stack_group,
            stack_tags=stack_tags,
            change_protected=elasticache_config.change_protected
        )
        self.set_aws_name('ElastiCache', grp_id, res_id, elasticache_config.engine )

        # Troposphere Template Generation
        self.init_template('ElastiCache: {} - {}'.format(
            elasticache_config.engine,
            elasticache_config.engine_version
        ))

        # if disabled then leave an empty placeholder and finish
        if not elasticache_config.is_enabled():
            return self.set_template()


        # Security Groups
        sg_params = []
        vpc_sg_list = []
        for sg_ref in elasticache_config.security_groups:
            ref = Reference(sg_ref)
            sg_param_name = 'SecurityGroupId'+ref.parts[-2]+ref.parts[-1]
            sg_param = self.create_cfn_parameter(
                name=sg_param_name,
                param_type='String',
                description='VPC Security Group Id',
                value=sg_ref + '.id',
            )
            sg_params.append(sg_param)
            vpc_sg_list.append(troposphere.Ref(sg_param))

        # Subnet Ids
        subnet_ids_param = self.create_cfn_parameter(
            name='SubnetIdList',
            param_type='List<String>',
            description='List of Subnet Ids to provision ElastiCache nodes',
            value=elasticache_config.segment+'.subnet_id_list',
        )

        # ElastiCache Subnet Group
        subnet_group_dict = {
            'Description': troposphere.Ref('AWS::StackName'),
            'SubnetIds' : troposphere.Ref(subnet_ids_param)
        }
        subnet_group_res = troposphere.elasticache.SubnetGroup.from_dict(
            'SubnetGroup',
            subnet_group_dict
        )
        self.template.add_resource(subnet_group_res)

        # ElastiCache Resource
        elasticache_dict = elasticache_config.cfn_export_dict
        elasticache_dict['SecurityGroupIds'] = vpc_sg_list
        elasticache_dict['CacheSubnetGroupName'] = troposphere.Ref(subnet_group_res)
        if elasticache_config.description:
            elasticache_dict['ReplicationGroupDescription'] = elasticache_config.description
        else:
            elasticache_dict['ReplicationGroupDescription'] = troposphere.Ref('AWS::StackName')

        cfn_cache_cluster_name = 'ReplicationGroup'
        cache_cluster_res = troposphere.elasticache.ReplicationGroup.from_dict(
            cfn_cache_cluster_name,
            elasticache_dict
        )
        self.template.add_resource(cache_cluster_res)

        # Outputs
        self.create_output(
            title='PrimaryEndPointAddress',
            description='ElastiCache PrimaryEndpoint Address',
            value=troposphere.GetAtt(cache_cluster_res, 'PrimaryEndPoint.Address'),
            ref=config_ref + ".primaryendpoint.address"
        )
        self.create_output(
            title='PrimaryEndPointPort',
            description='ElastiCache PrimaryEndpoint Port',
            value=troposphere.GetAtt(cache_cluster_res, 'PrimaryEndPoint.Port'),
            ref=config_ref + ".primaryendpoint.port"
        )
        self.create_output(
            title='ReadEndPointAddresses',
            description='ElastiCache ReadEndpoint Addresses',
            value=troposphere.GetAtt(cache_cluster_res, 'ReadEndPoint.Addresses'),
            ref=config_ref + ".readendpoint.addresses"
        )
        self.create_output(
            title='ReadEndPointPorts',
            description='ElastiCache ReadEndpoint Ports',
            value=troposphere.GetAtt(cache_cluster_res, 'ReadEndPoint.Ports'),
            ref=config_ref + ".readendpoint.ports",
        )

        self.set_template()
示例#23
0
    def __init__(
        self,
        stack,
        paco_ctx,
    ):
        super().__init__(
            stack,
            paco_ctx,
            iam_capabilities=["CAPABILITY_NAMED_IAM"],
        )
        account_ctx = stack.account_ctx
        aws_region = stack.aws_region
        self.set_aws_name('Lambda', self.resource_group_name, self.resource_name)
        awslambda = self.awslambda = self.stack.resource
        self.init_template('Lambda Function')

        # if not enabled finish with only empty placeholder
        if not awslambda.is_enabled(): return

        # Parameters
        sdb_cache_param = self.create_cfn_parameter(
            name='EnableSDBCache',
            param_type='String',
            description='Boolean indicating whether an SDB Domain will be created to be used as a cache.',
            value=awslambda.sdb_cache
        )
        function_description_param = self.create_cfn_parameter(
            name='FunctionDescription',
            param_type='String',
            description='A description of the Lamdba Function.',
            value=awslambda.description
        )
        handler_param = self.create_cfn_parameter(
            name='Handler',
            param_type='String',
            description='The name of the function to call upon execution.',
            value=awslambda.handler
        )
        runtime_param = self.create_cfn_parameter(
            name='Runtime',
            param_type='String',
            description='The name of the runtime language.',
            value=awslambda.runtime
        )
        role_arn_param = self.create_cfn_parameter(
            name='RoleArn',
            param_type='String',
            description='The execution role for the Lambda Function.',
            value=awslambda.iam_role.get_arn()
        )
        role_name_param = self.create_cfn_parameter(
            name='RoleName',
            param_type='String',
            description='The execution role name for the Lambda Function.',
            value=awslambda.iam_role.resolve_ref_obj.role_name
        )
        memory_size_param = self.create_cfn_parameter(
            name='MemorySize',
            param_type='Number',
            description="The amount of memory that your function has access to. Increasing the function's" + \
            " memory also increases its CPU allocation. The default value is 128 MB. The value must be a multiple of 64 MB.",
            value=awslambda.memory_size
        )
        reserved_conc_exec_param = self.create_cfn_parameter(
            name='ReservedConcurrentExecutions',
            param_type='Number',
            description='The number of simultaneous executions to reserve for the function.',
            value=awslambda.reserved_concurrent_executions
        )
        timeout_param = self.create_cfn_parameter(
            name='Timeout',
            param_type='Number',
            description='The amount of time that Lambda allows a function to run before stopping it. ',
            value=awslambda.timeout
        )
        layers_param = self.create_cfn_parameter(
            name='Layers',
            param_type='CommaDelimitedList',
            description='List of up to 5 Lambda Layer ARNs.',
            value=','.join(awslambda.layers)
        )

        # create the Lambda resource
        cfn_export_dict = {
            'Description': troposphere.Ref(function_description_param),
            'Handler': troposphere.Ref(handler_param),
            'MemorySize': troposphere.Ref(memory_size_param),
            'Runtime': troposphere.Ref(runtime_param),
            'Role': troposphere.Ref(role_arn_param),
            'Timeout': troposphere.Ref(timeout_param),
        }
        if awslambda.reserved_concurrent_executions:
            cfn_export_dict['ReservedConcurrentExecutions'] = troposphere.Ref(reserved_conc_exec_param),

        if len(awslambda.layers) > 0:
            cfn_export_dict['Layers'] = troposphere.Ref(layers_param),

        # Lambda VPC
        if awslambda.vpc_config != None:
            vpc_security_group = self.create_cfn_ref_list_param(
                name='VpcSecurityGroupIdList',
                param_type='List<AWS::EC2::SecurityGroup::Id>',
                description='VPC Security Group Id List',
                value=awslambda.vpc_config.security_groups,
                ref_attribute='id',
            )
            # Segment SubnetList is a Segment stack Output based on availability zones
            segment_ref = awslambda.vpc_config.segments[0] + '.subnet_id_list'
            subnet_list_param = self.create_cfn_parameter(
                name='VpcSubnetIdList',
                param_type='List<AWS::EC2::Subnet::Id>',
                description='VPC Subnet Id List',
                value=segment_ref
            )
            cfn_export_dict['VpcConfig'] = {
                'SecurityGroupIds': troposphere.Ref(vpc_security_group),
                'SubnetIds': troposphere.Ref(subnet_list_param),
            }

        # Code object: S3 Bucket, inline ZipFile or deploy artifact?
        if awslambda.code.s3_bucket:
            if awslambda.code.s3_bucket.startswith('paco.ref '):
                value = awslambda.code.s3_bucket + ".name"
            else:
                value = awslambda.code.s3_bucket
            s3bucket_param = self.create_cfn_parameter(
                name='CodeS3Bucket',
                description="An Amazon S3 bucket in the same AWS Region as your function. The bucket can be in a different AWS account.",
                param_type='String',
                value=value
            )
            s3key_param = self.create_cfn_parameter(
                name='CodeS3Key',
                description="The Amazon S3 key of the deployment package.",
                param_type='String',
                value=awslambda.code.s3_key
            )
            cfn_export_dict['Code'] = {
                'S3Bucket': troposphere.Ref(s3bucket_param),
                'S3Key': troposphere.Ref(s3key_param),
            }
        else:
            zip_path = Path(awslambda.code.zipfile)
            if zip_path.is_file():
                cfn_export_dict['Code'] = {
                    'ZipFile': zip_path.read_text()
                }
            elif zip_path.is_dir():
                # get S3Bucket/S3Key or if it does not exist, it will create the bucket and artifact
                # and then upload the artifact
                bucket_name, artifact_name = init_lambda_code(
                    self.paco_ctx.paco_buckets,
                    self.stack.resource,
                    awslambda.code.zipfile,
                    self.stack.account_ctx,
                    self.stack.aws_region,
                )
                s3bucket_param = self.create_cfn_parameter(
                    name='CodeS3Bucket',
                    description="The Paco S3 Bucket for configuration",
                    param_type='String',
                    value=bucket_name
                )
                s3key_param = self.create_cfn_parameter(
                    name='CodeS3Key',
                    description="The Lambda code artifact S3 Key.",
                    param_type='String',
                    value=artifact_name
                )
                cfn_export_dict['Code'] = {
                    'S3Bucket': troposphere.Ref(s3bucket_param),
                    'S3Key': troposphere.Ref(s3key_param),
                }

        # Environment variables
        var_export = {}
        if awslambda.environment != None and awslambda.environment.variables != None:
            for var in awslambda.environment.variables:
                name = var.key.replace('_','')
                env_param = self.create_cfn_parameter(
                    name='EnvVar{}'.format(name),
                    param_type='String',
                    description='Env var for {}'.format(name),
                    value=var.value,
                )
                var_export[var.key] = troposphere.Ref(env_param)
            if awslambda.sdb_cache == True:
                var_export['SDB_CACHE_DOMAIN'] = troposphere.Ref('LambdaSDBCacheDomain')
            if len(awslambda.log_group_names) > 0:
                # Add PACO_LOG_GROUPS Environment Variable
                paco_log_groups = [
                    prefixed_name(awslambda, loggroup_name, self.paco_ctx.legacy_flag)
                    for loggroup_name in awslambda.log_group_names
                ]
                paco_log_groups_param = self.create_cfn_parameter(
                    name='EnvVariablePacoLogGroups',
                    param_type='String',
                    description='Env var for Paco Log Groups',
                    value=','.join(paco_log_groups),
                )
                var_export['PACO_LOG_GROUPS'] = troposphere.Ref(paco_log_groups_param)
        cfn_export_dict['Environment'] = { 'Variables': var_export }

        # Lambda resource
        self.awslambda_resource = troposphere.awslambda.Function.from_dict(
            'Function',
            cfn_export_dict
        )
        self.template.add_resource(self.awslambda_resource)

        # SDB Cache with SDB Domain and SDB Domain Policy resources
        if awslambda.sdb_cache == True:
            sdb_domain_resource = troposphere.sdb.Domain(
                title='LambdaSDBCacheDomain',
                template=self.template,
                Description="Lambda Function Domain"
            )
            sdb_policy = troposphere.iam.Policy(
                title='LambdaSDBCacheDomainPolicy',
                template=self.template,
                PolicyName='SDBDomain',
                PolicyDocument=Policy(
                    Version='2012-10-17',
                    Statement=[
                        Statement(
                            Effect=Allow,
                            Action=[Action("sdb","*")],
                            Resource=[
                                troposphere.Sub(
                                    'arn:aws:sdb:${AWS::Region}:${AWS::AccountId}:domain/${DomainName}',
                                    DomainName=troposphere.Ref('LambdaSDBCacheDomain')
                                )
                            ],
                        )
                    ],
                    Roles=troposphere.Ref(role_arn_param)
                )
            )
            sdb_policy.DependsOn = sdb_domain_resource
            self.awslambda_resource.DependsOn = sdb_domain_resource

        # Permissions
        # SNS Topic Lambda permissions and subscription
        idx = 1
        for sns_topic_ref in awslambda.sns_topics:
            # SNS Topic Arn parameters
            param_name = 'SNSTopicArn%d' % idx
            self.create_cfn_parameter(
                name=param_name,
                param_type='String',
                description='An SNS Topic ARN to grant permission to.',
                value=sns_topic_ref + '.arn'
            )

            # Lambda permission
            troposphere.awslambda.Permission(
                title=param_name + 'Permission',
                template=self.template,
                Action="lambda:InvokeFunction",
                FunctionName=troposphere.GetAtt(self.awslambda_resource, 'Arn'),
                Principal='sns.amazonaws.com',
                SourceArn=troposphere.Ref(param_name),
            )

            # SNS Topic subscription
            sns_topic = get_model_obj_from_ref(sns_topic_ref, self.paco_ctx.project)
            troposphere.sns.SubscriptionResource(
                title=param_name + 'Subscription',
                template=self.template,
                Endpoint=troposphere.GetAtt(self.awslambda_resource, 'Arn'),
                Protocol='lambda',
                TopicArn=troposphere.Ref(param_name),
                Region=sns_topic.region_name
            )
            idx += 1


        # Lambda permissions for connected Paco resources

        app = get_parent_by_interface(awslambda, schemas.IApplication)
        for obj in get_all_nodes(app):
            # S3 Bucket notification permission(s)
            if schemas.IS3Bucket.providedBy(obj):
                seen = {}
                if hasattr(obj, 'notifications'):
                    if hasattr(obj.notifications, 'lambdas'):
                        for lambda_notif in obj.notifications.lambdas:
                            if lambda_notif.function == awslambda.paco_ref:
                                # yes, this Lambda gets notification from this S3Bucket
                                group = get_parent_by_interface(obj, schemas.IResourceGroup)
                                s3_logical_name = self.gen_cf_logical_name(group.name + obj.name, '_')
                                if s3_logical_name not in seen:
                                    troposphere.awslambda.Permission(
                                        title='S3Bucket' + s3_logical_name,
                                        template=self.template,
                                        Action="lambda:InvokeFunction",
                                        FunctionName=troposphere.GetAtt(self.awslambda_resource, 'Arn'),
                                        Principal='s3.amazonaws.com',
                                        SourceArn='arn:aws:s3:::' + obj.get_bucket_name(),
                                    )
                                    seen[s3_logical_name] = True

            # Events Rule permission(s)
            if schemas.IEventsRule.providedBy(obj):
                seen = {}
                for target in obj.targets:
                    target_ref = Reference(target.target)
                    target_ref.set_account_name(account_ctx.get_name())
                    target_ref.set_region(aws_region)
                    lambda_ref = Reference(awslambda.paco_ref)

                    if target_ref.raw == lambda_ref.raw:
                        # yes, the Events Rule has a Target that is this Lambda
                        group = get_parent_by_interface(obj, schemas.IResourceGroup)
                        eventsrule_logical_name = self.gen_cf_logical_name(group.name + obj.name, '_')
                        if eventsrule_logical_name not in seen:
                            rule_name = create_event_rule_name(obj)
                            # rule_name = self.create_cfn_logical_id("EventsRule" + obj.paco_ref)
                            # rule_name = hash_smaller(rule_name, 64)
                            source_arn = 'arn:aws:events:{}:{}:rule/{}'.format(
                                aws_region,
                                account_ctx.id,
                                rule_name
                            )
                            troposphere.awslambda.Permission(
                                title='EventsRule' + eventsrule_logical_name,
                                template=self.template,
                                Action="lambda:InvokeFunction",
                                FunctionName=troposphere.GetAtt(self.awslambda_resource, 'Arn'),
                                Principal='events.amazonaws.com',
                                SourceArn=source_arn,
                            )
                            seen[eventsrule_logical_name] = True

            # IoT Analytics permission(s)
            if schemas.IIoTAnalyticsPipeline.providedBy(obj):
                seen = {}
                for activity in obj.pipeline_activities.values():
                    if activity.activity_type == 'lambda':
                        target_ref = Reference(activity.function)
                        target_ref.set_account_name(account_ctx.get_name())
                        target_ref.set_region(aws_region)
                        lambda_ref = Reference(awslambda.paco_ref)
                        if target_ref.raw == lambda_ref.raw:
                            # yes, the IoT Analytics Lambda Activity has a ref to this Lambda
                            group = get_parent_by_interface(obj, schemas.IResourceGroup)
                            iotap_logical_name = self.gen_cf_logical_name(group.name + obj.name, '_')
                            if iotap_logical_name not in seen:
                                rule_name = create_event_rule_name(obj)
                                troposphere.awslambda.Permission(
                                    title='IoTAnalyticsPipeline' + iotap_logical_name,
                                    template=self.template,
                                    Action="lambda:InvokeFunction",
                                    FunctionName=troposphere.GetAtt(self.awslambda_resource, 'Arn'),
                                    Principal='iotanalytics.amazonaws.com',
                                )
                                seen[iotap_logical_name] = True

        # Log group(s)
        loggroup_function_name = troposphere.Join(
            '', [
                '/aws/lambda/',
                troposphere.Select(
                    6, troposphere.Split(':', troposphere.GetAtt(self.awslambda_resource, 'Arn'))
                )
            ]
        )
        loggroup_resources = []
        loggroup_resources.append(
            self.add_log_group(loggroup_function_name, 'lambda')
        )
        if len(awslambda.log_group_names) > 0:
            # Additional App-specific LogGroups
            for loggroup_name in awslambda.log_group_names:
                # Add LogGroup to the template
                prefixed_loggroup_name = prefixed_name(awslambda, loggroup_name, self.paco_ctx.legacy_flag)
                loggroup_resources.append(
                    self.add_log_group(prefixed_loggroup_name)
                )

        # LogGroup permissions
        log_group_arns = [
            troposphere.Join(':', [
                f'arn:aws:logs:{self.aws_region}:{account_ctx.id}:log-group',
                loggroup_function_name,
                '*'
            ])
        ]
        log_stream_arns = [
            troposphere.Join(':', [
                f'arn:aws:logs:{self.aws_region}:{account_ctx.id}:log-group',
                loggroup_function_name,
                'log-stream',
                '*'
            ])
        ]
        for loggroup_name in awslambda.log_group_names:
            prefixed_loggroup_name = prefixed_name(awslambda, loggroup_name, self.paco_ctx.legacy_flag)
            log_group_arns.append(
                f'arn:aws:logs:{self.aws_region}:{account_ctx.id}:log-group:{prefixed_loggroup_name}:*'
            )
            log_stream_arns.append(
                f'arn:aws:logs:{self.aws_region}:{account_ctx.id}:log-group:{prefixed_loggroup_name}:log-stream:*'
            )

        loggroup_policy_resource = troposphere.iam.ManagedPolicy(
            title='LogGroupManagedPolicy',
            PolicyDocument=Policy(
                Version='2012-10-17',
                Statement=[
                    Statement(
                        Sid='AllowLambdaModifyLogStreams',
                        Effect=Allow,
                        Action=[
                            Action("logs","CreateLogStream"),
                            Action("logs","DescribeLogStreams"),
                        ],
                        Resource=log_group_arns,
                    ),
                    Statement(
                        Sid='AllowLambdaPutLogEvents',
                        Effect=Allow,
                        Action=[
                            Action("logs","PutLogEvents"),
                        ],
                        Resource=log_stream_arns,
                    ),
                ],
            ),
            Roles=[troposphere.Ref(role_name_param)],
        )
        loggroup_policy_resource.DependsOn = loggroup_resources
        self.template.add_resource(loggroup_policy_resource)

        # Outputs
        self.create_output(
            title='FunctionName',
            value=troposphere.Ref(self.awslambda_resource),
            ref=awslambda.paco_ref_parts + '.name',
        )
        self.create_output(
            title='FunctionArn',
            value=troposphere.GetAtt(self.awslambda_resource, 'Arn'),
            ref=awslambda.paco_ref_parts + '.arn',
        )
示例#24
0
    def __init__(self, paco_ctx, account_ctx, aws_region, stack_group,
                 stack_tags, env_ctx, app_id, grp_id, elb_id, elb_config,
                 elb_config_ref):
        #paco_ctx.log("ELB CF Template init")

        self.env_ctx = env_ctx
        segment_stack = self.env_ctx.get_segment_stack(elb_config['segment'])

        super().__init__(paco_ctx=paco_ctx,
                         account_ctx=account_ctx,
                         aws_region=aws_region,
                         enabled=elb_config.is_enabled(),
                         config_ref=elb_config_ref,
                         stack_group=stack_group,
                         stack_tags=stack_tags)
        self.set_aws_name('ELB', grp_id, elb_id)

        # Initialize Parameters
        self.set_parameter('HealthyThreshold',
                           elb_config['health_check']['healthy_threshold'])
        self.set_parameter('HealthCheckInterval',
                           elb_config['health_check']['interval'])
        self.set_parameter('HealthyTimeout',
                           elb_config['health_check']['timeout'])
        self.set_parameter('HealthCheckTarget',
                           elb_config['health_check']['target'])
        self.set_parameter('UnhealthyThreshold',
                           elb_config['health_check']['unhealthy_threshold'])
        self.set_parameter('ConnectionDrainingEnabled',
                           elb_config['connection_draining']['enabled'])
        self.set_parameter('ConnectionDrainingTimeout',
                           elb_config['connection_draining']['timeout'])
        self.set_parameter('ConnectionSettingsIdleSeconds',
                           elb_config['connection_settings']['idle_timeout'])
        self.set_parameter('CrossZone', elb_config['cross_zone'])
        self.set_parameter('CustomDomainName',
                           elb_config['dns']['domain_name'])
        self.set_parameter('HostedZoneId', elb_config['dns']['hosted_zone'])
        self.set_parameter('DNSEnabled', elb_config.is_dns_enabled())

        elb_region = self.env_ctx.region
        self.set_parameter('ELBHostedZoneId',
                           self.lb_hosted_zone_id('elb', elb_region))

        # 32 Characters max
        # <proj>-<env>-<app>-<elb_id>
        # TODO: Limit each name item to 7 chars
        # Name collision risk:, if unique identifying characrtes are truncated
        #   - Add a hash?
        #   - Check for duplicates with validating template
        # TODO: Make a method for this
        #load_balancer_name = paco_ctx.project_ctx.name + "-" + paco_ctx.env_ctx.name + "-" + stack_group_ctx.application_name + "-" + elb_id
        load_balancer_name = self.create_resource_name_join(name_list=[
            self.env_ctx.netenv_id, self.env_ctx.env_id, app_id, elb_id
        ],
                                                            separator='',
                                                            camel_case=True)
        self.set_parameter('LoadBalancerEnabled', elb_config.is_enabled())
        self.set_parameter('LoadBalancerName', load_balancer_name)

        self.set_parameter('Scheme', elb_config['scheme'])

        # Segment SubnetList is a Segment stack Output based on availability zones
        subnet_list_key = 'SubnetList' + str(self.env_ctx.availability_zones())
        self.set_parameter(
            StackOutputParam('SubnetList', segment_stack, subnet_list_key,
                             self))

        # Security Group List
        # TODO: Use self.create_cfn_ref_list_param()
        sg_output_param = StackOutputParam('SecurityGroupList',
                                           param_template=self)
        for sg_ref in elb_config['security_groups']:
            # TODO: Better name for self.get_stack_outputs_key_from_ref?
            sg_output_key = self.get_stack_outputs_key_from_ref(
                Reference(sg_ref))
            sg_stack = self.paco_ctx.get_ref(sg_ref, 'stack')
            sg_output_param.add_stack_output(sg_stack, sg_output_key)
        self.set_parameter(sg_output_param)

        # Define the Template
        template_fmt = """
AWSTemplateFormatVersion: '2010-09-09'
Description: 'Elastic Load Balancer'

Parameters:

  LoadBalancerEnabled:
    Description: Boolean indicating whether the load balancer is enabled or not.
    Type: String
    AllowedValues:
      - true
      - false

  HealthyThreshold:
    Description: Specifies the number of consecutive health probe successes required before moving the instance to the Healthy state.
    Type: Number

  HealthCheckInterval:
    Description: Specifies the approximate interval, in seconds, between health checks of an individual instance.
    Type: Number

  HealthyTimeout:
    Description: Specifies the amount of time, in seconds, during which no response means a failed health probe.
    Type: Number

  HealthCheckTarget:
    Description: The ELBs healtcheck target
    Type: String

  UnhealthyThreshold:
    Description: Specifies the number of consecutive health probe failures required before moving the instance to the Unhealthy state.
    Type: Number

  ConnectionDrainingEnabled:
    Description: Boolean indicating whether connections draining is enabled
    Type: String
    AllowedValues:
      - true
      - false

  ConnectionDrainingTimeout:
    Description: The time in seconds after the load balancer closes all connections to a deregistered or unhealthy instance.
    Type: Number

  ConnectionSettingsIdleSeconds:
    Description: The time in seconds that a connection to the load balancer can remain idle before being forcibly closed.
    Type: Number

  CrossZone:
    Description: Whether cross availability zone load balancing is enabled for the load balancer.
    Type: String
    MinLength: '1'
    MaxLength: '128'

  LoadBalancerName:
    Description: The name of the load balancer
    Type: String

  Scheme:
    Description: 'Specify internal to create an internal load balancer with a DNS name that resolves to private IP addresses or internet-facing to create a load balancer with a publicly resolvable DNS name, which resolves to public IP addresses.'
    Type: String
    MinLength: '1'
    MaxLength: '128'

  SubnetList:
    Description: A list of subnets where the ELBs instances will be provisioned
    Type: List<AWS::EC2::Subnet::Id>

  SecurityGroupList:
    Description: A List of security groups to attach to the ELB
    Type: List<AWS::EC2::SecurityGroup::Id>

  CustomDomainName:
    Description: Custom DNS name to assign to the ELB
    Type: String
    Default: ""

  HostedZoneId:
    Description: The Route53 Hosted Zone ID where the Custom Domain will be added
    Type: String

  ELBHostedZoneId:
    Description: The Regonal AWS Route53 Hosted Zone ID
    Type: String

  DNSEnabled:
    Description: Enables the creation of DNS Record Sets
    Type: String

{0[SSLCertificateParameters]:s}

Conditions:
  IsEnabled: !Equals [!Ref LoadBalancerEnabled, "true"]
  CustomDomainExists: !Not [!Equals [!Ref CustomDomainName, ""] ]
  DNSIsEnabled: !Equals [!Ref DNSEnabled, "true"]
  CustomDomainIsEnabled: !And
    - !Condition DNSIsEnabled
    - !Condition CustomDomainExists
    - !Condition IsEnabled

Resources:

# Elastic Load Balancer

  ClassicLoadBalancer:
    Type: AWS::ElasticLoadBalancing::LoadBalancer
    Condition: IsEnabled
    Properties:
      LoadBalancerName: !Ref LoadBalancerName
      Subnets: !Ref SubnetList
      HealthCheck:
        HealthyThreshold: !Ref HealthyThreshold
        Interval: !Ref HealthCheckInterval
        Target: !Ref HealthCheckTarget
        Timeout: !Ref HealthyTimeout
        UnhealthyThreshold: !Ref UnhealthyThreshold
      ConnectionDrainingPolicy:
        Enabled: !Ref ConnectionDrainingEnabled
        Timeout: !Ref ConnectionDrainingTimeout
      ConnectionSettings:
        IdleTimeout: !Ref ConnectionSettingsIdleSeconds
      CrossZone: !Ref CrossZone
      Scheme: !Ref Scheme
      SecurityGroups: !Ref SecurityGroupList
      Listeners: {0[Listeners]:s}

  RecordSet:
    Type: AWS::Route53::RecordSet
    Condition: CustomDomainIsEnabled
    Properties:
      HostedZoneId: !Ref HostedZoneId
      Name: !Ref CustomDomainName
      Type: A
      AliasTarget:
        DNSName: !GetAtt ClassicLoadBalancer.DNSName
        HostedZoneId: !GetAtt ClassicLoadBalancer.CanonicalHostedZoneNameID

Outputs:
  LoadBalancer:
    Value: !Ref ClassicLoadBalancer
"""
        ssl_cert_param_fmt = """
  SSLCertificateId{0[idx]:d}:
    Description: The Arn of the SSL Certificate to associate with this Load Balancer
    Type: String

"""

        listener_fmt = """
        - InstancePort: {0[instance_port]:d}
          LoadBalancerPort: {0[elb_port]:d}
          Protocol: {0[elb_protocol]:s}
          InstanceProtocol: {0[instance_protocol]:s}"""

        listener_table = {
            'idx': None,
            'instance_port': None,
            'elb_port': None,
            'elb_protocol': None,
            'instance_protocol': None
        }

        ssl_certificate_fmt = """
          SSLCertificateId: !Ref SSLCertificateId{0[idx]:d}"""

        listener_yaml = ""
        ssl_cert_param_yaml = ""
        listener_idx = 0
        for listener in elb_config['listeners']:
            listener_table['idx'] = listener_idx
            listener_table['instance_port'] = listener['instance_port']
            listener_table['elb_port'] = listener['elb_port']
            listener_table['elb_protocol'] = listener['elb_protocol']
            listener_table['instance_protocol'] = listener['instance_protocol']
            listener_yaml += listener_fmt.format(listener_table)
            if 'ssl_certificate_id' in listener:
                listener_yaml += ssl_certificate_fmt.format(listener_table)
                ssl_cert_param_yaml += ssl_cert_param_fmt.format(
                    listener_table)
                self.set_parameter(
                    'SSLCertificateId' + str(listener_idx),
                    self.paco_ctx.get_ref(listener['ssl_certificate_id']))
            listener_idx += 1

        template_fmt_table = {
            'Listeners': listener_yaml,
            'SSLCertificateParameters': ssl_cert_param_yaml
        }

        self.set_template(template_fmt.format(template_fmt_table))
示例#25
0
    def iam_user_access_keys_hook(self, hook, iamuser):
        "Manage the IAM User's Access Keys"
        access = iamuser.programmatic_access
        if access == None: return

        username = self.iamuser_stack.get_outputs_value(
            self.iamuser_stack.get_outputs_key_from_ref(
                Reference(self.resource.paco_ref + '.username')))
        iamuser_client = IAMUserClient(self.account_ctx, self.aws_region,
                                       username)

        # enable or disable existing Access Keys
        if not iamuser.is_enabled() or access.enabled == False:
            iamuser_client.disable_access_keys()
            return
        iamuser_client.enable_access_keys()

        # Get list of access keys and load their versions
        keys_meta = iamuser_client.list_access_keys()
        old_keys = {
            '1': None,
            '2': None,
        }
        statename = md5sum(str_data=username)
        s3key = f"IAMUser/{statename}"
        api_key_state = self.paco_ctx.paco_buckets.get_object(
            s3key, self.account_ctx, self.aws_region)
        if api_key_state == None:
            api_key_state = {}
        else:
            api_key_state = json.loads(api_key_state.decode("utf-8"))
        start_state = copy.copy(api_key_state)

        for key_meta in keys_meta['AccessKeyMetadata']:
            key_info = api_key_state.get(key_meta['AccessKeyId'], None)
            if key_info == None:
                print(
                    f"Creating missing KeyNum AccessKeyMetadata for: {username} + {key_meta['AccessKeyId']}"
                )
                api_key_state[key_meta['AccessKeyId']] = {}
                key_num = str(keys_meta['AccessKeyMetadata'].index(key_meta) +
                              1)
                api_key_state[key_meta['AccessKeyId']]['KeyNum'] = key_num
                api_key_state[key_meta['AccessKeyId']]['Version'] = getattr(
                    access, f'access_key_{key_num}_version')
            key_num = api_key_state[key_meta['AccessKeyId']]['KeyNum']
            key_version = api_key_state[key_meta['AccessKeyId']]['Version']
            old_keys[key_num] = {
                'access_key_id': key_meta['AccessKeyId'],
                'version': int(key_version),
                'key_num': key_num,
            }

        # Loop through user configuration and update keys
        for key_num in ['1', '2']:
            new_key_version = getattr(access, f'access_key_{key_num}_version')
            if old_keys[key_num] == None and new_key_version > 0:
                access_key_id = iamuser_client.create_access_key(key_num)
                api_key_state[access_key_id] = {
                    "KeyNum": key_num,
                    "Version": new_key_version
                }
            elif old_keys[key_num] != None and new_key_version == 0:
                access_key_id = old_keys[key_num]['access_key_id']
                iamuser_client.delete_access_key(key_num, access_key_id)
                del api_key_state[access_key_id]
            elif old_keys[key_num] != None and old_keys[key_num][
                    'version'] != new_key_version:
                old_access_key_id = old_keys[key_num]['access_key_id']
                access_key_id = iamuser_client.rotate_access_key(
                    new_key_version, old_access_key_id)
                del api_key_state[old_access_key_id]
                api_key_state[access_key_id] = {
                    "KeyNum": key_num,
                    "Version": new_key_version
                }

        # save updated api_key_state
        if start_state != api_key_state:
            self.paco_ctx.paco_buckets.put_object(s3key,
                                                  json.dumps(api_key_state),
                                                  self.account_ctx,
                                                  self.aws_region)
示例#26
0
    def paco_sub(self):
        "Perform paco.sub expressions with the substitution string"
        while True:
            # Isolate string between quotes: paco.sub ''
            sub_idx = self.body.find('paco.sub')
            if sub_idx == -1:
                break
            end_idx = self.body.find('\n', sub_idx)
            if end_idx == -1:
                end_idx = len(self.body)
            str_idx = self.body.find("'", sub_idx, end_idx)
            if str_idx == -1:
                raise StackException(PacoErrorCode.Unknown,
                                     message="paco.sub error")
            str_idx += 1
            end_str_idx = self.body.find("'", str_idx, end_idx)
            if end_str_idx == -1:
                raise StackException(PacoErrorCode.Unknown,
                                     message="paco.sub error")
            # Isolate any ${} replacements
            first_pass = True
            while True:
                dollar_idx = self.body.find("${", str_idx, end_str_idx)
                if dollar_idx == -1:
                    if first_pass == True:
                        message = 'Unable to find paco.ref in paco.sub expression.\n'
                        message += 'Stack: {}\n'.format(self.stack.get_name())
                        message += "paco.sub '{}'\n".format(
                            self.body[str_idx:end_str_idx])
                        raise StackException(PacoErrorCode.Unknown,
                                             message=message)
                    else:
                        break
                rep_1_idx = dollar_idx
                rep_2_idx = self.body.find("}", rep_1_idx, end_str_idx) + 1
                next_ref_idx = self.body.find("paco.ref ", rep_1_idx,
                                              rep_2_idx)
                if next_ref_idx != -1:
                    sub_ref_idx = next_ref_idx
                    sub_ref = self.body[sub_ref_idx:sub_ref_idx +
                                        (rep_2_idx - sub_ref_idx - 1)]
                    if sub_ref.find('<account>') != -1:
                        sub_ref = sub_ref.replace('<account>',
                                                  self.account_ctx.get_name())
                    if sub_ref.find('<environment>') != -1:
                        sub_ref = sub_ref.replace('<environment>',
                                                  self.environment_name)
                    if sub_ref.find('<region>') != -1:
                        sub_ref = sub_ref.replace('<region>', self.aws_region)

                    sub_value = self.paco_ctx.get_ref(sub_ref)
                    if sub_value == None:
                        raise StackException(
                            PacoErrorCode.Unknown,
                            message=
                            "cftemplate: paco_sub: Unable to locate value for ref: "
                            + sub_ref)
                    # Replace the ${}
                    sub_var = self.body[rep_1_idx:rep_1_idx +
                                        (rep_2_idx - rep_1_idx)]
                    # if a Stack is returned, then look-up the referenced Stack Output and use that
                    if paco.stack.interfaces.IStack.providedBy(sub_value):
                        sub_value = sub_value.get_outputs_value(
                            sub_value.get_outputs_key_from_ref(
                                Reference(sub_ref)))
                    self.body = self.body.replace(sub_var, sub_value, 1)
                else:
                    #print("break 3")
                    break
                first_pass = False

            # Remote paco.sub '' scaffolding
            self.body = self.body[:sub_idx] + self.body[str_idx:]
            end_idx = self.body.find('\n', sub_idx)
            end_str_idx = self.body.find("'", sub_idx, end_idx)
            self.body = self.body[:end_str_idx] + self.body[end_str_idx + 1:]
示例#27
0
    def __init__(self, paco_ctx, account_ctx, aws_region, stack_group,
                 stack_tags, env_id, app_id, grp_id, ec2_id, ec2_config,
                 ec2_config_ref):
        #paco_ctx.log("EC2 CF Template init")

        super().__init__(paco_ctx,
                         account_ctx,
                         aws_region,
                         enabled=ec2_config.is_enabled(),
                         config_ref=ec2_config_ref,
                         stack_group=stack_group,
                         stack_tags=stack_tags)
        self.set_aws_name('EC2', grp_id, ec2_id)

        # Initialize Parameters
        instance_name = self.create_resource_name_join(
            [self.env_ctx.netenv_id, env_id, app_id, ec2_id], '-', True)
        self.set_parameter('InstanceName', instance_name)
        self.set_parameter('AssociatePublicIpAddress',
                           ec2_config.associate_public_ip_address)
        self.set_parameter('InstanceAMI', ec2_config.instance_ami)
        self.set_parameter('KeyName', ec2_config.instance_key_pair)

        #self.set_parameter('SubnetId', ec2_config['?'])

        # Segment SubnetList is a Segment stack Output based on availability zones
        segment_stack = self.env_ctx.get_segment_stack(ec2_config.segment)
        subnet_list_output_key = 'SubnetList1'
        self.set_parameter(
            StackOutputParam('SubnetId', segment_stack, subnet_list_output_key,
                             self))

        # Security Group List
        # TODO: Use self.create_cfn_ref_list_param()
        sg_output_param = StackOutputParam('SecurityGroupIds',
                                           param_template=self)
        for sg_ref in ec2_config.security_groups:
            # TODO: Better name for self.get_stack_outputs_key_from_ref?
            security_group_stack = self.paco_ctx.get_ref(sg_ref)
            sg_output_key = self.get_stack_outputs_key_from_ref(
                Reference(sg_ref))
            sg_output_param.add_stack_output(security_group_stack,
                                             sg_output_key)
        self.set_parameter(sg_output_param)

        self.set_parameter('InstanceType', ec2_config.instance_type)
        self.set_parameter('InstanceIAMProfileName',
                           ec2_config.instance_iam_profile)
        self.set_parameter('RootVolumeSizeGB', ec2_config.root_volume_size_gb)

        self.set_parameter('DisableApiTermination',
                           ec2_config.disable_api_termination)
        self.set_parameter('PrivateIpAddress', ec2_config.private_ip_address)

        self.set_parameter('UserData', ec2_config.user_data)

        # Define the Template
        template_fmt = """
---
AWSTemplateFormatVersion: "2010-09-09"

Description: EC2 Instance

Parameters:

  InstanceAMI:
    Description: AMI to launch EC2 with.
    Type: String

  KeyName:
    Description: EC2 key pair name.
    Type: AWS::EC2::KeyPair::KeyName

  SubnetId:
    Description: The ID of the subnet where the instance will be launched.
    Type: AWS::EC2::Subnet::Id

  SecurityGroupIds:
    Description: List of Security Group IDs to attach to the instance.
    Type: List<AWS::EC2::SecurityGroup::Id>

  InstanceType:
    Description: EC2 instance type
    Type: String

  InstanceIAMProfileName:
    Description: The name of the IAM Profile to attach to the instance.
    Type: String

  RootVolumeSizeGB:
    Description: The size EBS volume to attach to the instance.
    Type: String

  DisableApiTermination:
    Description: Boolean indicating whether the instance can be terminated programatically.
    Type: String

  PrivateIpAddress:
    Description: Private IP address to assign to the instance.
    Type: String

  UserData:
    Description: User data script to run at instance launch.
    Type: String

  InstanceName:
    Description: The name of the Instance
    Type: String

  AssociatePublicIpAddress:
    Description: Boolean, if true will assign a Public IP address to the instance
    Type: String

Conditions:
  PrivateIpIsEnabled: !Not [!Equals [!Ref PrivateIpAddress, '']]
  ProfileIsEnabled: !Not [!Equals [!Ref InstanceIAMProfileName, '']]

Resources:

  Instance:
    Type: "AWS::EC2::Instance"
    Properties:
      BlockDeviceMappings:
          - DeviceName: /dev/xvda
            Ebs:
              VolumeSize: !Ref RootVolumeSizeGB
              VolumeType: "gp2"
      NetworkInterfaces:
        - AssociatePublicIpAddress: !Ref AssociatePublicIpAddress
          DeviceIndex: "0"
          GroupSet: !Ref SecurityGroupIds
          SubnetId: !Ref SubnetId
      DisableApiTermination: !Ref DisableApiTermination
      ImageId: !Ref InstanceAMI
      InstanceInitiatedShutdownBehavior: 'stop'
      IamInstanceProfile: !If [ProfileIsEnabled, !Ref InstanceIAMProfileName, !Ref 'AWS::NoValue']
      InstanceType: !Ref InstanceType
      KeyName: !Ref KeyName
      PrivateIpAddress:
        !If [PrivateIpIsEnabled, !Ref PrivateIpAddress, !Ref "AWS::NoValue"]
      Tags:
        - Key: Name
          Value: !Ref InstanceName
#      UserData:
#        Fn::Base64: !Ref UserData

#################### Outputs ###################################
Outputs:
  InstanceId:
    Value: !Ref Instance
"""
        self.register_stack_output_config(ec2_config_ref + '.id', 'InstanceId')

        self.set_template(template_fmt)
示例#28
0
    def init(self):
        # Network Stack Templates
        # VPC Stack
        vpc_config = self.env_ctx.env_region.network.vpc
        if vpc_config == None:
            # NetworkEnvironment with no network - serverless
            return
        network_config = get_parent_by_interface(vpc_config, schemas.INetwork)
        vpc_config.resolve_ref_obj = self
        vpc_config.private_hosted_zone.resolve_ref_obj = self
        self.vpc_stack = self.add_new_stack(
            self.region,
            vpc_config,
            paco.cftemplates.VPC,
            stack_tags=StackTags(self.stack_tags),
        )

        # Segments
        self.segment_list = []
        self.segment_dict = {}
        segments = network_config.vpc.segments
        for segment in segments.values():
            segment.resolve_ref_obj = self
            segment_stack = self.add_new_stack(
                self.region,
                segment,
                paco.cftemplates.Segment,
                stack_tags=StackTags(self.stack_tags),
                stack_orders=[StackOrder.PROVISION],
                extra_context={'env_ctx': self.env_ctx},
            )
            self.segment_dict[segment.name] = segment_stack
            self.segment_list.append(segment_stack)

        # Security Groups
        sg_config = network_config.vpc.security_groups
        self.sg_list = []
        self.sg_dict = {}
        # EC2 NATGateway Security Groups
        # Creates a security group for each Availability Zone in the segment
        sg_nat_id = 'bastion_nat_' + utils.md5sum(str_data='gateway')[:8]
        for nat_config in vpc_config.nat_gateway.values():
            if nat_config.is_enabled() == False:
                continue
            if nat_config.type == 'EC2':
                sg_nat_config_dict = {}
                if sg_nat_id not in sg_config.keys():
                    sg_config[sg_nat_id] = paco.models.networks.SecurityGroups(
                        sg_nat_id, sg_config)
                for az_idx in range(1, network_config.availability_zones + 1):
                    sg_nat_config_dict['enabled'] = True
                    sg_nat_config_dict['ingress'] = []
                    for route_segment in nat_config.default_route_segments:
                        route_segment_id = route_segment.split('.')[-1]
                        az_cidr = getattr(
                            vpc_config.segments[route_segment_id],
                            f"az{az_idx}_cidr")
                        sg_nat_config_dict['ingress'].append({
                            'name': 'SubnetAZ',
                            'cidr_ip': az_cidr,
                            'protocol': '-1'
                        })
                    sg_nat_config_dict['egress'] = [{
                        'name': 'ANY',
                        'cidr_ip': '0.0.0.0/0',
                        'protocol': '-1'
                    }]
                    sg_nat_rule_id = nat_config.name + '_az' + str(az_idx)
                    sg_config[sg_nat_id][
                        sg_nat_rule_id] = paco.models.networks.SecurityGroup(
                            sg_nat_rule_id, vpc_config)
                    paco.models.loader.apply_attributes_from_config(
                        sg_config[sg_nat_id][sg_nat_rule_id],
                        sg_nat_config_dict)

        # Declared Security Groups
        for sg_id in sg_config:
            # Set resolve_ref_obj
            for sg_obj_id in sg_config[sg_id]:
                sg_config[sg_id][sg_obj_id].resolve_ref_obj = self
            sg_stack = self.add_new_stack(
                self.region,
                sg_config[sg_id],
                paco.cftemplates.SecurityGroups,
                stack_tags=StackTags(self.stack_tags),
                extra_context={
                    'env_ctx': self.env_ctx,
                    'template_type': 'Groups'
                },
            )
            self.sg_list.append(sg_stack)
            self.sg_dict[sg_id] = sg_stack

        # Ingress/Egress Stacks
        for sg_id in sg_config:
            self.add_new_stack(self.region,
                               sg_config[sg_id],
                               paco.cftemplates.SecurityGroups,
                               stack_tags=StackTags(self.stack_tags),
                               extra_context={
                                   'env_ctx': self.env_ctx,
                                   'template_type': 'Rules'
                               })

        # Wait for Segment Stacks
        for segment_stack in self.segment_list:
            self.add_stack_order(segment_stack, [StackOrder.WAIT])

        # VPC Peering Stack
        if vpc_config.peering != None:
            peering_config = self.env_ctx.env_region.network.vpc.peering
            for peer_id in peering_config.keys():
                peer_config = vpc_config.peering[peer_id]
                peer_config.resolve_ref_obj = self
                # Add role to the target network account
                if peer_config.network_environment != None and peer_config.peer_type == 'accepter':
                    netenv_ref = Reference(peer_config.network_environment +
                                           '.network')
                    requester_netenv_config = netenv_ref.resolve(
                        self.paco_ctx.project)
                    requester_account_id = self.paco_ctx.get_ref(
                        requester_netenv_config.aws_account + '.id')
                    accepter_vpc_id = self.paco_ctx.get_ref(
                        vpc_config.paco_ref + '.id')
                    # Only create the role if we are cross account
                    if self.account_ctx.id != requester_account_id:
                        self.gen_vpc_peering_accepter_role(
                            peer_config, vpc_config, accepter_vpc_id,
                            requester_account_id)
            self.peering_stack = self.add_new_stack(
                self.region,
                vpc_config.peering,
                paco.cftemplates.VPCPeering,
                stack_tags=StackTags(self.stack_tags),
            )

        # NAT Gateway
        self.nat_list = []
        for nat_config in vpc_config.nat_gateway.values():
            if sg_nat_id in sg_config.keys():
                nat_sg_config = sg_config[sg_nat_id]
            else:
                nat_sg_config = None
            # We now disable the NAT Gateway in the template so that we can delete it and recreate it when disabled.
            nat_stack = self.add_new_stack(
                self.region,
                nat_config,
                paco.cftemplates.NATGateway,
                stack_tags=StackTags(self.stack_tags),
                stack_orders=[StackOrder.PROVISION],
                extra_context={'nat_sg_config': nat_sg_config},
            )
            self.nat_list.append(nat_stack)

        for nat_stack in self.nat_list:
            self.add_stack_order(nat_stack, [StackOrder.WAIT])

        # VPC Endpoints
        vpc_endpoints_stack = self.add_new_stack(
            self.region,
            vpc_config,
            paco.cftemplates.VPCEndpoints,
            stack_tags=StackTags(self.stack_tags),
            stack_orders=[StackOrder.PROVISION])
        self.add_stack_order(vpc_endpoints_stack, [StackOrder.WAIT])
示例#29
0
    def create_codebuild_cfn(self, template, pipeline_config, action_config,
                             config_ref):
        # CodeBuild
        compute_type_param = self.create_cfn_parameter(
            param_type='String',
            name='CodeBuildComputeType',
            description=
            'The type of compute environment. This determines the number of CPU cores and memory the build environment uses.',
            value=action_config.codebuild_compute_type,
        )
        image_param = self.create_cfn_parameter(
            param_type='String',
            name='CodeBuildImage',
            description=
            'The image tag or image digest that identifies the Docker image to use for this build project.',
            value=action_config.codebuild_image,
        )
        deploy_env_name_param = self.create_cfn_parameter(
            param_type='String',
            name='DeploymentEnvironmentName',
            description=
            'The name of the environment codebuild will be deploying into.',
            value=action_config.deployment_environment,
        )
        # If ECS Release Phase, then create the needed parameters
        release_phase = action_config.release_phase
        ecs_release_phase_cluster_arn_param = []
        ecs_release_phase_cluster_name_param = []
        ecs_release_phase_service_arn_param = []
        if release_phase != None and release_phase.ecs != None:
            idx = 0
            for command in release_phase.ecs:
                service_obj = get_model_obj_from_ref(command.service,
                                                     self.paco_ctx.project)
                service_obj = get_parent_by_interface(service_obj,
                                                      schemas.IECSServices)
                cluster_arn_param = self.create_cfn_parameter(
                    param_type='String',
                    name=f'ReleasePhaseECSClusterArn{idx}',
                    description='ECS Cluster Arn',
                    value=service_obj.cluster + '.arn',
                )
                ecs_release_phase_cluster_arn_param.append(cluster_arn_param)
                cluster_arn_param = self.create_cfn_parameter(
                    param_type='String',
                    name=f'ReleasePhaseECSClusterName{idx}',
                    description='ECS Cluster Name',
                    value=service_obj.cluster + '.name',
                )
                ecs_release_phase_cluster_name_param.append(cluster_arn_param)
                service_arn_param = self.create_cfn_parameter(
                    param_type='String',
                    name=f'ReleasePhaseECSServiceArn{idx}',
                    description='ECS Service Arn',
                    value=command.service + '.arn',
                )
                ecs_release_phase_service_arn_param.append(service_arn_param)
                idx += 1
        self.project_role_name = self.create_iam_resource_name(
            name_list=[self.res_name_prefix, 'CodeBuild-Project'],
            filter_id='IAM.Role.RoleName')

        # codecommit_repo_users ManagedPolicies
        managed_policy_arns = []
        for user_ref in action_config.codecommit_repo_users:
            user = get_model_obj_from_ref(user_ref, self.paco_ctx.project)
            # codecommit_stack = user.__parent__.__parent__.__parent__.stack
            user_logical_id = self.gen_cf_logical_name(user.username)
            codecommit_user_policy_param = self.create_cfn_parameter(
                param_type='String',
                name='CodeCommitUserPolicy' + user_logical_id,
                description='The CodeCommit User Policy for ' + user.username,
                value=user_ref + '.policy.arn',
            )
            managed_policy_arns.append(
                troposphere.Ref(codecommit_user_policy_param))

        project_role_res = troposphere.iam.Role(
            title='CodeBuildProjectRole',
            template=template,
            RoleName=self.project_role_name,
            ManagedPolicyArns=managed_policy_arns,
            AssumeRolePolicyDocument=PolicyDocument(
                Version="2012-10-17",
                Statement=[
                    Statement(
                        Effect=Allow,
                        Action=[AssumeRole],
                        Principal=Principal("Service",
                                            ['codebuild.amazonaws.com']),
                    )
                ]))

        project_policy_name = self.create_iam_resource_name(
            name_list=[self.res_name_prefix, 'CodeBuild-Project'],
            filter_id='IAM.Policy.PolicyName')

        # Project Policy
        policy_statements = []
        if self.enable_artifacts_bucket:
            policy_statements.append(
                Statement(
                    Sid='S3Access',
                    Effect=Allow,
                    Action=[
                        Action('s3', 'PutObject'),
                        Action('s3', 'PutObjectAcl'),
                        Action('s3', 'GetObject'),
                        Action('s3', 'GetObjectAcl'),
                        Action('s3', 'ListBucket'),
                        Action('s3', 'DeleteObject'),
                        Action('s3', 'GetBucketPolicy'),
                        Action('s3', 'HeadObject'),
                    ],
                    Resource=[
                        troposphere.Sub('arn:aws:s3:::${ArtifactsBucketName}'),
                        troposphere.Sub(
                            'arn:aws:s3:::${ArtifactsBucketName}/*'),
                    ]))
        if pipeline_config.configuration.disable_codepipeline == False:
            policy_statements.append(
                Statement(Sid='KMSCMK',
                          Effect=Allow,
                          Action=[Action('kms', '*')],
                          Resource=[troposphere.Ref(self.cmk_arn_param)]))
        policy_statements.append(
            Statement(Sid='CloudWatchLogsAccess',
                      Effect=Allow,
                      Action=[
                          Action('logs', 'CreateLogGroup'),
                          Action('logs', 'CreateLogStream'),
                          Action('logs', 'PutLogEvents'),
                      ],
                      Resource=['arn:aws:logs:*:*:*']))

        release_phase = action_config.release_phase
        if release_phase != None and release_phase.ecs != None:
            ssm_doc = self.paco_ctx.project['resource']['ssm'].ssm_documents[
                'paco_ecs_docker_exec']
            # SSM Exec Document
            policy_statements.append(
                Statement(Sid='ECSReleasePhaseSSMCore',
                          Effect=Allow,
                          Action=[
                              Action('ssm', 'ListDocuments'),
                              Action('ssm', 'ListDocumentVersions'),
                              Action('ssm', 'DescribeDocument'),
                              Action('ssm', 'GetDocument'),
                              Action('ssm', 'DescribeInstanceInformation'),
                              Action('ssm', 'DescribeDocumentParameters'),
                              Action('ssm', 'CancelCommand'),
                              Action('ssm', 'ListCommands'),
                              Action('ssm', 'ListCommandInvocations'),
                              Action('ssm', 'DescribeAutomationExecutions'),
                              Action('ssm', 'DescribeInstanceProperties'),
                              Action('ssm', 'GetCommandInvocation'),
                              Action('ec2', 'DescribeInstanceStatus'),
                          ],
                          Resource=['*']))
            policy_statements.append(
                Statement(
                    Sid=f'ECSReleasePhaseSSMSendCommandDocument',
                    Effect=Allow,
                    Action=[
                        Action('ssm', 'SendCommand'),
                    ],
                    Resource=[
                        f'arn:aws:ssm:{self.aws_region}:{self.account_ctx.get_id()}:document/paco_ecs_docker_exec'
                    ]))
            idx = 0
            for command in release_phase.ecs:
                policy_statements.append(
                    Statement(
                        Sid=f'ECSReleasePhaseSSMSendCommand{idx}',
                        Effect=Allow,
                        Action=[
                            Action('ssm', 'SendCommand'),
                        ],
                        Resource=[f'arn:aws:ec2:*:*:instance/*'],
                        Condition=Condition(
                            StringLike({
                                'ssm:resourceTag/Paco-ECSCluster-Name':
                                troposphere.Ref(
                                    ecs_release_phase_cluster_name_param[idx])
                            }))))

                policy_statements.append(
                    Statement(
                        Sid=f'ECSRelasePhaseClusterAccess{idx}',
                        Effect=Allow,
                        Action=[
                            Action('ecs', 'DescribeServices'),
                            Action('ecs', 'RunTask'),
                            Action('ecs', 'StopTask'),
                            Action('ecs', 'DescribeContainerInstances'),
                            Action('ecs', 'ListTasks'),
                            Action('ecs', 'DescribeTasks'),
                        ],
                        Resource=['*'],
                        Condition=Condition(
                            StringEquals({
                                'ecs:cluster':
                                troposphere.Ref(
                                    ecs_release_phase_cluster_arn_param[idx])
                            }))))
                idx += 1

            policy_statements.append(
                Statement(Sid='ECSReleasePhaseSSMAutomationExecution',
                          Effect=Allow,
                          Action=[
                              Action('ssm', 'StartAutomationExecution'),
                              Action('ssm', 'StopAutomationExecution'),
                              Action('ssm', 'GetAutomationExecution'),
                          ],
                          Resource=['arn:aws:ssm:::automation-definition/']))
            # ECS Policies
            policy_statements.append(
                Statement(Sid='ECSRelasePhaseECS',
                          Effect=Allow,
                          Action=[
                              Action('ecs', 'DescribeTaskDefinition'),
                              Action('ecs', 'DeregisterTaskDefinition'),
                              Action('ecs', 'RegisterTaskDefinition'),
                              Action('ecs', 'ListTagsForResource'),
                              Action('ecr', 'DescribeImages')
                          ],
                          Resource=['*']))

            # IAM Pass Role
            policy_statements.append(
                Statement(Sid='IAMPassRole',
                          Effect=Allow,
                          Action=[Action('iam', 'passrole')],
                          Resource=['*']))

        if len(action_config.secrets) > 0:
            secrets_arn_list = []
            for secret_ref in action_config.secrets:
                name_hash = md5sum(str_data=secret_ref)
                secret_arn_param = self.create_cfn_parameter(
                    param_type='String',
                    name='SecretsArn' + name_hash,
                    description=
                    'Secrets Manager Secret Arn to expose access to',
                    value=secret_ref + '.arn')
                secrets_arn_list.append(troposphere.Ref(secret_arn_param))
            policy_statements.append(
                Statement(Sid='SecretsManager',
                          Effect=Allow,
                          Action=[
                              Action('secretsmanager', 'GetSecretValue'),
                          ],
                          Resource=secrets_arn_list))

        project_policy_res = troposphere.iam.PolicyType(
            title='CodeBuildProjectPolicy',
            PolicyName=project_policy_name,
            PolicyDocument=PolicyDocument(Statement=policy_statements),
            Roles=[troposphere.Ref(project_role_res)])
        project_policy_res.DependsOn = project_role_res
        template.add_resource(project_policy_res)

        # User defined policies
        for policy in action_config.role_policies:
            policy_name = self.create_resource_name_join(
                name_list=[
                    self.res_name_prefix, 'CodeBuild-Project', policy.name
                ],
                separator='-',
                filter_id='IAM.Policy.PolicyName',
                hash_long_names=True,
                camel_case=True)
            statement_list = []

            for statement in policy.statement:
                action_list = []
                for action in statement.action:
                    action_parts = action.split(':')
                    action_list.append(Action(action_parts[0],
                                              action_parts[1]))
                statement_list.append(
                    Statement(Effect=statement.effect,
                              Action=action_list,
                              Resource=statement.resource))
            troposphere.iam.PolicyType(
                title=self.create_cfn_logical_id('CodeBuildProjectPolicy' +
                                                 policy.name,
                                                 camel_case=True),
                template=template,
                PolicyName=policy_name,
                PolicyDocument=PolicyDocument(Statement=statement_list, ),
                Roles=[troposphere.Ref(project_role_res)])

        # ECR Permission Policies
        self.set_ecr_repositories_statements(
            action_config.ecr_repositories, template,
            f'{self.res_name_prefix}-CodeBuild-Project',
            [troposphere.Ref(project_role_res)])

        # CodeBuild Project Resource
        timeout_mins_param = self.create_cfn_parameter(
            param_type='String',
            name='TimeoutInMinutes',
            description=
            'How long, in minutes, from 5 to 480 (8 hours), for AWS CodeBuild to wait before timing out any related build that did not get marked as completed.',
            value=action_config.timeout_mins,
        )

        # Environment Variables
        codebuild_env_vars = [{
            'Name': 'DeploymentEnvironmentName',
            'Value': troposphere.Ref(deploy_env_name_param)
        }]
        if pipeline_config.configuration.disable_codepipeline == False:
            codebuild_env_vars.append({
                'Name':
                'KMSKey',
                'Value':
                troposphere.Ref(self.cmk_arn_param)
            })
        if self.enable_artifacts_bucket:
            codebuild_env_vars.append({
                'Name':
                'ArtifactsBucket',
                'Value':
                troposphere.Ref(self.artifacts_bucket_name_param),
            })
        # If ECS Release Phase, then add the config to the environment
        release_phase = action_config.release_phase
        if release_phase != None and release_phase.ecs != None:
            idx = 0
            for command in release_phase.ecs:
                codebuild_env_vars.append({
                    'Name':
                    f'PACO_CB_RP_ECS_CLUSTER_ID_{idx}',
                    'Value':
                    troposphere.Ref(ecs_release_phase_cluster_arn_param[idx])
                })
                codebuild_env_vars.append({
                    'Name':
                    f'PACO_CB_RP_ECS_SERVICE_ID_{idx}',
                    'Value':
                    troposphere.Ref(ecs_release_phase_service_arn_param[idx])
                })
                idx += 1

        # CodeBuild: Environment
        project_dict = {
            'Name':
            troposphere.Ref(self.resource_name_prefix_param),
            'Artifacts': {
                'Type': 'NO_ARTIFACTS'
            },
            'Description':
            troposphere.Ref('AWS::StackName'),
            'ServiceRole':
            troposphere.GetAtt('CodeBuildProjectRole', 'Arn'),
            'Environment': {
                'Type': 'LINUX_CONTAINER',
                'ComputeType': troposphere.Ref(compute_type_param),
                'Image': troposphere.Ref(image_param),
                'EnvironmentVariables': codebuild_env_vars,
                'PrivilegedMode': action_config.privileged_mode
            },
            'Source': {
                'Type': 'NO_SOURCE'
            },
            'TimeoutInMinutes':
            troposphere.Ref(timeout_mins_param),
            'Tags':
            troposphere.codebuild.Tags(
                Name=troposphere.Ref(self.resource_name_prefix_param))
        }

        if action_config.buildspec:
            project_dict['Source']['BuildSpec'] = action_config.buildspec

        if pipeline_config.configuration.disable_codepipeline == False:
            project_dict['EncryptionKey'] = troposphere.Ref(self.cmk_arn_param)
            project_dict['Artifacts'] = {'Type': 'CODEPIPELINE'}
            project_dict['Source']['Type'] = 'CODEPIPELINE'
        else:
            if action_config.artifacts == None or action_config.artifacts.type == 'NO_ARTIFACTS':
                project_dict['Artifacts'] = {
                    'Type': 'NO_ARTIFACTS',
                }
            else:
                project_dict['Artifacts'] = {
                    'Type': action_config.artifacts.type,
                    'Location':
                    troposphere.Ref(self.artifacts_bucket_name_param),
                    'NamespaceType': action_config.artifacts.namespace_type,
                    'Packaging': action_config.artifacts.packaging,
                    'Name': action_config.artifacts.name
                }
                if action_config.artifacts.path != None:
                    project_dict['Artifacts'][
                        'Path'] = action_config.artifacts.path
            if action_config.source.github != None:
                github_config = action_config.source.github
                project_dict['Source']['Type'] = 'GITHUB'
                location = f'https://github.com/{github_config.github_owner}/{github_config.github_repository}.git'
                project_dict['Source']['Location'] = location
                project_dict['Source'][
                    'ReportBuildStatus'] = github_config.report_build_status
                if github_config.deployment_branch_name != None:
                    project_dict[
                        'SourceVersion'] = github_config.deployment_branch_name
            else:
                raise PacoException(
                    "CodeBuild source must be configured when Codepipeline is disabled."
                )

        if action_config.concurrent_build_limit > 0:
            project_dict[
                'ConcurrentBuildLimit'] = action_config.concurrent_build_limit

        if action_config.vpc_config != None:
            vpc_config = action_config.vpc_config
            vpc_id_param = self.create_cfn_parameter(
                name='VPC',
                param_type='AWS::EC2::VPC::Id',
                description='The VPC Id',
                value='paco.ref netenv.{}.<environment>.<region>.network.vpc.id'
                .format(self.env_ctx.netenv.name),
            )

            security_group_list = []
            for sg_ref in vpc_config.security_groups:
                ref = Reference(sg_ref)
                sg_param_name = self.gen_cf_logical_name('SecurityGroupId' +
                                                         ref.parts[-2] +
                                                         ref.parts[-1])
                sg_param = self.create_cfn_parameter(
                    name=sg_param_name,
                    param_type='String',
                    description='Security Group Id',
                    value=sg_ref + '.id',
                )
                security_group_list.append(troposphere.Ref(sg_param))

            # security_group_list_param = self.create_cfn_ref_list_param(
            #     param_type='List<AWS::EC2::SecurityGroup::Id>',
            #     name='SecurityGroupList',
            #     description='List of security group ids to attach to CodeBuild.',
            #     value=vpc_config.security_groups,
            #     ref_attribute='id',
            # )
            subnet_id_list = []
            subnet_arn_list = []
            az_size = self.env_ctx.netenv[self.account_ctx.name][
                self.aws_region].network.availability_zones
            for segment_ref in vpc_config.segments:
                for az_idx in range(1, az_size + 1):
                    # Subnet Ids
                    segment_name = self.create_cfn_logical_id(
                        f"Segment{segment_ref.split('.')[-1]}AZ{az_idx}")
                    subnet_id_param = self.create_cfn_parameter(
                        name=segment_name,
                        param_type='AWS::EC2::Subnet::Id',
                        description=
                        f'VPC Subnet Id in AZ{az_idx} for CodeBuild VPC Config',
                        value=segment_ref + f'.az{az_idx}.subnet_id')
                    subnet_id_list.append(troposphere.Ref(subnet_id_param))
                    # Subnet Arns
                    subnet_arn_param = self.create_cfn_parameter(
                        name=segment_name + 'Arn',
                        param_type='String',
                        description=
                        f'VPC Subnet Id ARN in AZ{az_idx} for CodeBuild VPC Config',
                        value=segment_ref + f'.az{az_idx}.subnet_id.arn')
                    subnet_arn_list.append(troposphere.Ref(subnet_arn_param))

            if len(subnet_id_list) == 0:
                raise PacoException(
                    "CodeBuild VPC Config must have at least one segment defined."
                )

            # VPC Config Permissions
            policy_statements.append(
                Statement(Sid='VpcConfigPermissions',
                          Effect=Allow,
                          Action=[
                              Action('ec2', 'CreateNetworkInterface'),
                              Action('ec2', 'DescribeDhcpOptions'),
                              Action('ec2', 'DescribeNetworkInterfaces'),
                              Action('ec2', 'DeleteNetworkInterface'),
                              Action('ec2', 'DescribeSubnets'),
                              Action('ec2', 'DescribeSecurityGroups'),
                              Action('ec2', 'DescribeVpcs'),
                          ],
                          Resource=['*']))
            policy_statements.append(
                Statement(
                    Sid='VpcConfigNetworkInterface',
                    Effect=Allow,
                    Action=[
                        Action('ec2', 'CreateNetworkInterfacePermission'),
                    ],
                    Resource=[
                        f'arn:aws:ec2:{self.aws_region}:{self.account_ctx.id}:network-interface/*'
                    ],
                    Condition=Condition([
                        StringEquals({
                            "ec2:AuthorizedService":
                            "codebuild.amazonaws.com"
                        }),
                        ArnEquals({"ec2:Subnet": subnet_arn_list})
                    ])))

            project_dict['VpcConfig'] = {
                'VpcId': troposphere.Ref(vpc_id_param),
                'SecurityGroupIds': security_group_list,
                'Subnets': subnet_id_list
            }

        # Batch Build Config
        batch_service_role_res = None
        if action_config.build_batch_config != None and action_config.build_batch_config.is_enabled(
        ):
            batch_config = action_config.build_batch_config

            batch_service_role_name = self.create_iam_resource_name(
                name_list=[
                    self.res_name_prefix, 'CodeBuild-BuildBatch-ServiceRole'
                ],
                filter_id='IAM.Role.RoleName')
            batch_service_role_res = troposphere.iam.Role(
                title='CodeBuildBuildBatchConfigServiceRole',
                template=template,
                RoleName=batch_service_role_name,
                AssumeRolePolicyDocument=PolicyDocument(
                    Version="2012-10-17",
                    Statement=[
                        Statement(
                            Effect=Allow,
                            Action=[AssumeRole],
                            Principal=Principal("Service",
                                                ['codebuild.amazonaws.com']),
                        )
                    ]))

            project_dict['BuildBatchConfig'] = {
                'BatchReportMode': batch_config.batch_report_mode,
                'CombineArtifacts': batch_config.combine_artifacts,
                'TimeoutInMins': batch_config.timeout_in_mins,
                'ServiceRole': troposphere.GetAtt(batch_service_role_res,
                                                  'Arn'),
                'Restrictions': {
                    'ComputeTypesAllowed':
                    batch_config.restrictions.compute_types_allowed,
                    'MaximumBuildsAllowed':
                    batch_config.restrictions.maximum_builds_allowed
                }
            }

        project_res = troposphere.codebuild.Project.from_dict(
            'CodeBuildProject', project_dict)
        project_res.DependsOn = project_policy_res
        if action_config.build_batch_config != None and action_config.build_batch_config.is_enabled(
        ):
            project_res.DependsOn = batch_service_role_res

        self.template.add_resource(project_res)

        if batch_service_role_res != None:
            build_batch_policy_statements = []
            build_batch_policy_statements.append(
                Statement(Sid='BatchServiceRole',
                          Effect=Allow,
                          Action=[
                              Action('codebuild', 'StartBuild'),
                              Action('codebuild', 'StopBuild'),
                              Action('codebuild', 'RetryBuild')
                          ],
                          Resource=[troposphere.GetAtt(project_res, 'Arn')]))

            batch_policy_name = self.create_iam_resource_name(
                name_list=[self.res_name_prefix, 'CodeBuild-BatchPolicy'],
                filter_id='IAM.Policy.PolicyName')
            batch_policy_res = troposphere.iam.PolicyType(
                title='CodeBuildBuildBatchPolicy',
                template=template,
                PolicyName=batch_policy_name,
                PolicyDocument=PolicyDocument(
                    Statement=build_batch_policy_statements),
                Roles=[troposphere.Ref(batch_service_role_res)])

            batch_policy_res.DependsOn = project_res

        self.create_output(title='ProjectArn',
                           value=troposphere.GetAtt(project_res, 'Arn'),
                           description='CodeBuild Project Arn',
                           ref=config_ref + '.project.arn')

        return project_res
示例#30
0
    def __init__(
        self,
        stack,
        paco_ctx,
    ):
        super().__init__(
            stack,
            paco_ctx,
            iam_capabilities=["CAPABILITY_NAMED_IAM"],
        )
        eventsrule = stack.resource
        config_ref = eventsrule.paco_ref_parts
        self.set_aws_name('EventsRule', self.resource_group_name, self.resource_name)

        # Init a Troposphere template
        self.init_template('CloudWatch EventsRule')

        if eventsrule.is_enabled() == False:
            return

        # Parameters
        schedule_expression_param = None
        if eventsrule.schedule_expression:
            schedule_expression_param = self.create_cfn_parameter(
                param_type = 'String',
                name = 'ScheduleExpression',
                description = 'ScheduleExpression for the Event Rule.',
                value = eventsrule.schedule_expression,
            )
        description_param = self.create_cfn_parameter(
            param_type = 'String',
            name = 'EventDescription',
            description = 'Description for the Event Rule.',
            value = eventsrule.description,
        )

        # Targets
        targets = []
        self.target_params = {}
        for index in range(0, len(eventsrule.targets)):
            target = eventsrule.targets[index]
            # Target Parameters
            target_name = 'Target{}'.format(index)

            # Target CFN Parameters
            self.target_params[target_name + 'Arn'] = self.create_cfn_parameter(
                param_type='String',
                name=target_name + 'Arn',
                description=target_name + ' Arn for the Events Rule.',
                value=target.target + '.arn',
            )
            self.target_params[target_name] = self.create_cfn_parameter(
                param_type='String',
                name=target_name,
                description=target_name + ' for the Event Rule.',
                value=target_name,
            )

            # IAM Role
            # Lambda Policy Actions
            target_ref = Reference(target.target)
            if target_ref.parts[-1] == 'project' and target_ref.parts[-3] == 'build':
                codebuild_target_ref = f'paco.ref {".".join(target_ref.parts[:-1])}'
                target_model_obj = get_model_obj_from_ref(codebuild_target_ref, self.paco_ctx.project)
            else:
                target_model_obj = get_model_obj_from_ref(target.target, self.paco_ctx.project)

            # IAM Role Polcies by Resource type
            target_policy_actions = None
            if schemas.IDeploymentPipelineBuildCodeBuild.providedBy(target_model_obj):
                # CodeBuild Project
                target_policy_actions = [awacs.codebuild.StartBuild]
            elif schemas.ILambda.providedBy(target_model_obj):
                # Lambda Function
                target_policy_actions = [awacs.awslambda.InvokeFunction]


            target_invocation_role_resource = None
            if target_policy_actions != None:
                # IAM Role Resources to allow Event to invoke Target
                target_invocation_role_resource = troposphere.iam.Role(
                    'TargetInvocationRole',
                    AssumeRolePolicyDocument=Policy(
                        Version='2012-10-17',
                        Statement=[
                            Statement(
                                Effect=Allow,
                                Action=[awacs.sts.AssumeRole],
                                Principal=Principal('Service',['events.amazonaws.com'])
                            )
                        ],
                    ),
                    Policies=[
                        troposphere.iam.Policy(
                            PolicyName="TargetInvocation",
                            PolicyDocument=Policy(
                                Version='2012-10-17',
                                Statement=[
                                    Statement(
                                        Effect=Allow,
                                        Action=target_policy_actions,
                                        Resource=[troposphere.Ref(self.target_params[target_name + 'Arn'])],
                                    )
                                ]
                            )
                        )
                    ],
                )
                self.template.add_resource(target_invocation_role_resource)

            # Create Target CFN Resources
            cfn_export_dict = {
                'Arn': troposphere.Ref(self.target_params[target_name + 'Arn']),
                'Id': troposphere.Ref(self.target_params[target_name])
            }

            if target_invocation_role_resource != None:
                cfn_export_dict['RoleArn'] = troposphere.GetAtt(target_invocation_role_resource, 'Arn')
            if target.input_json != None:
                cfn_export_dict['Input'] = target.input_json

            # Events Rule Targets
            targets.append(cfn_export_dict)
            #     troposphere.events.Target.from_dict(
            #         target_name,
            #         cfn_export_dict
            #     )
            # )

        # Events Rule Resource
        # The Name is needed so that a Lambda can be created and it's Lambda ARN output
        # can be supplied as a Parameter to this Stack and a Lambda Permission can be
        # made with the Lambda. Avoids circular dependencies.
        name = create_event_rule_name(eventsrule)
        if eventsrule.enabled_state:
            enabled_state = 'ENABLED'
        else:
            enabled_state = 'DISABLED'

        events_rule_dict = {
            'Name': name,
            'Description': troposphere.Ref(description_param),
            'Targets': targets,
            'State': enabled_state
        }

        if target_invocation_role_resource != None:
            events_rule_dict['RoleArn'] = troposphere.GetAtt(target_invocation_role_resource, 'Arn')

        if schedule_expression_param != None:
            events_rule_dict['ScheduleExpression'] = troposphere.Ref(schedule_expression_param)
        else:
            event_pattern_yaml = """
source:
    - aws.codepipeline
detail-type:
    - 'CodePipeline Pipeline Execution State Change'
detail:
    state:
    - STARTED
"""
            events_rule_dict['EventPattern'] = yaml.load(event_pattern_yaml)

        event_rule_resource = troposphere.events.Rule.from_dict(
            'EventRule',
            events_rule_dict
        )
        if target_invocation_role_resource != None:
            event_rule_resource.DependsOn = target_invocation_role_resource
        self.template.add_resource(event_rule_resource)

        # Outputs
        self.create_output(
            title="EventRuleId",
            value=troposphere.Ref(event_rule_resource),
            ref=config_ref + '.id',
        )
        self.create_output(
            title="EventRuleArn",
            value=troposphere.GetAtt(event_rule_resource, "Arn"),
            ref=config_ref + '.arn',
        )