Exemplo n.º 1
0
 def create_lambda_invoke_properties(self, stage, action, info):
     function_arn_param = self.create_cfn_parameter(
         param_type='String',
         name=self.create_cfn_logical_id('Lambda' + stage.name +
                                         action.name),
         description='The name of the Lambda for stage {} and action {}'.
         format(stage.name, action.name),
         value=action.target_lambda + '.arn',
     )
     user_parameters_param = self.create_cfn_parameter(
         param_type='String',
         name=self.create_cfn_logical_id('UserParameters' + stage.name +
                                         action.name),
         description='The UserParameters for stage {} and action {}'.format(
             stage.name, action.name),
         value=action.user_parameters,
     )
     lambda_function_name = troposphere.Join('', [
         troposphere.Select(
             6, troposphere.Split(':', troposphere.Ref(function_arn_param)))
     ])
     return {
         'Configuration': {
             'FunctionName': lambda_function_name,
             'UserParameters': troposphere.Ref(user_parameters_param),
         },
     }
Exemplo n.º 2
0
    def __init__(
        self,
        stack,
        paco_ctx,
    ):
        super().__init__(
            stack,
            paco_ctx,
            iam_capabilities=["CAPABILITY_NAMED_IAM"],
        )
        account_ctx = stack.account_ctx
        aws_region = stack.aws_region
        self.set_aws_name('Lambda', self.resource_group_name, self.resource_name)
        awslambda = self.awslambda = self.stack.resource
        self.init_template('Lambda Function')

        # if not enabled finish with only empty placeholder
        if not awslambda.is_enabled(): return

        # Parameters
        sdb_cache_param = self.create_cfn_parameter(
            name='EnableSDBCache',
            param_type='String',
            description='Boolean indicating whether an SDB Domain will be created to be used as a cache.',
            value=awslambda.sdb_cache
        )
        function_description_param = self.create_cfn_parameter(
            name='FunctionDescription',
            param_type='String',
            description='A description of the Lamdba Function.',
            value=awslambda.description
        )
        handler_param = self.create_cfn_parameter(
            name='Handler',
            param_type='String',
            description='The name of the function to call upon execution.',
            value=awslambda.handler
        )
        runtime_param = self.create_cfn_parameter(
            name='Runtime',
            param_type='String',
            description='The name of the runtime language.',
            value=awslambda.runtime
        )
        role_arn_param = self.create_cfn_parameter(
            name='RoleArn',
            param_type='String',
            description='The execution role for the Lambda Function.',
            value=awslambda.iam_role.get_arn()
        )
        role_name_param = self.create_cfn_parameter(
            name='RoleName',
            param_type='String',
            description='The execution role name for the Lambda Function.',
            value=awslambda.iam_role.resolve_ref_obj.role_name
        )
        memory_size_param = self.create_cfn_parameter(
            name='MemorySize',
            param_type='Number',
            description="The amount of memory that your function has access to. Increasing the function's" + \
            " memory also increases its CPU allocation. The default value is 128 MB. The value must be a multiple of 64 MB.",
            value=awslambda.memory_size
        )
        reserved_conc_exec_param = self.create_cfn_parameter(
            name='ReservedConcurrentExecutions',
            param_type='Number',
            description='The number of simultaneous executions to reserve for the function.',
            value=awslambda.reserved_concurrent_executions
        )
        timeout_param = self.create_cfn_parameter(
            name='Timeout',
            param_type='Number',
            description='The amount of time that Lambda allows a function to run before stopping it. ',
            value=awslambda.timeout
        )
        layers_param = self.create_cfn_parameter(
            name='Layers',
            param_type='CommaDelimitedList',
            description='List of up to 5 Lambda Layer ARNs.',
            value=','.join(awslambda.layers)
        )

        # create the Lambda resource
        cfn_export_dict = {
            'Description': troposphere.Ref(function_description_param),
            'Handler': troposphere.Ref(handler_param),
            'MemorySize': troposphere.Ref(memory_size_param),
            'Runtime': troposphere.Ref(runtime_param),
            'Role': troposphere.Ref(role_arn_param),
            'Timeout': troposphere.Ref(timeout_param),
        }
        if awslambda.reserved_concurrent_executions:
            cfn_export_dict['ReservedConcurrentExecutions'] = troposphere.Ref(reserved_conc_exec_param),

        if len(awslambda.layers) > 0:
            cfn_export_dict['Layers'] = troposphere.Ref(layers_param),

        # Lambda VPC
        if awslambda.vpc_config != None:
            vpc_security_group = self.create_cfn_ref_list_param(
                name='VpcSecurityGroupIdList',
                param_type='List<AWS::EC2::SecurityGroup::Id>',
                description='VPC Security Group Id List',
                value=awslambda.vpc_config.security_groups,
                ref_attribute='id',
            )
            # Segment SubnetList is a Segment stack Output based on availability zones
            segment_ref = awslambda.vpc_config.segments[0] + '.subnet_id_list'
            subnet_list_param = self.create_cfn_parameter(
                name='VpcSubnetIdList',
                param_type='List<AWS::EC2::Subnet::Id>',
                description='VPC Subnet Id List',
                value=segment_ref
            )
            cfn_export_dict['VpcConfig'] = {
                'SecurityGroupIds': troposphere.Ref(vpc_security_group),
                'SubnetIds': troposphere.Ref(subnet_list_param),
            }

        # Code object: S3 Bucket, inline ZipFile or deploy artifact?
        if awslambda.code.s3_bucket:
            if awslambda.code.s3_bucket.startswith('paco.ref '):
                value = awslambda.code.s3_bucket + ".name"
            else:
                value = awslambda.code.s3_bucket
            s3bucket_param = self.create_cfn_parameter(
                name='CodeS3Bucket',
                description="An Amazon S3 bucket in the same AWS Region as your function. The bucket can be in a different AWS account.",
                param_type='String',
                value=value
            )
            s3key_param = self.create_cfn_parameter(
                name='CodeS3Key',
                description="The Amazon S3 key of the deployment package.",
                param_type='String',
                value=awslambda.code.s3_key
            )
            cfn_export_dict['Code'] = {
                'S3Bucket': troposphere.Ref(s3bucket_param),
                'S3Key': troposphere.Ref(s3key_param),
            }
        else:
            zip_path = Path(awslambda.code.zipfile)
            if zip_path.is_file():
                cfn_export_dict['Code'] = {
                    'ZipFile': zip_path.read_text()
                }
            elif zip_path.is_dir():
                # get S3Bucket/S3Key or if it does not exist, it will create the bucket and artifact
                # and then upload the artifact
                bucket_name, artifact_name = init_lambda_code(
                    self.paco_ctx.paco_buckets,
                    self.stack.resource,
                    awslambda.code.zipfile,
                    self.stack.account_ctx,
                    self.stack.aws_region,
                )
                s3bucket_param = self.create_cfn_parameter(
                    name='CodeS3Bucket',
                    description="The Paco S3 Bucket for configuration",
                    param_type='String',
                    value=bucket_name
                )
                s3key_param = self.create_cfn_parameter(
                    name='CodeS3Key',
                    description="The Lambda code artifact S3 Key.",
                    param_type='String',
                    value=artifact_name
                )
                cfn_export_dict['Code'] = {
                    'S3Bucket': troposphere.Ref(s3bucket_param),
                    'S3Key': troposphere.Ref(s3key_param),
                }

        # Environment variables
        var_export = {}
        if awslambda.environment != None and awslambda.environment.variables != None:
            for var in awslambda.environment.variables:
                name = var.key.replace('_','')
                env_param = self.create_cfn_parameter(
                    name='EnvVar{}'.format(name),
                    param_type='String',
                    description='Env var for {}'.format(name),
                    value=var.value,
                )
                var_export[var.key] = troposphere.Ref(env_param)
            if awslambda.sdb_cache == True:
                var_export['SDB_CACHE_DOMAIN'] = troposphere.Ref('LambdaSDBCacheDomain')
            if len(awslambda.log_group_names) > 0:
                # Add PACO_LOG_GROUPS Environment Variable
                paco_log_groups = [
                    prefixed_name(awslambda, loggroup_name, self.paco_ctx.legacy_flag)
                    for loggroup_name in awslambda.log_group_names
                ]
                paco_log_groups_param = self.create_cfn_parameter(
                    name='EnvVariablePacoLogGroups',
                    param_type='String',
                    description='Env var for Paco Log Groups',
                    value=','.join(paco_log_groups),
                )
                var_export['PACO_LOG_GROUPS'] = troposphere.Ref(paco_log_groups_param)
        cfn_export_dict['Environment'] = { 'Variables': var_export }

        # Lambda resource
        self.awslambda_resource = troposphere.awslambda.Function.from_dict(
            'Function',
            cfn_export_dict
        )
        self.template.add_resource(self.awslambda_resource)

        # SDB Cache with SDB Domain and SDB Domain Policy resources
        if awslambda.sdb_cache == True:
            sdb_domain_resource = troposphere.sdb.Domain(
                title='LambdaSDBCacheDomain',
                template=self.template,
                Description="Lambda Function Domain"
            )
            sdb_policy = troposphere.iam.Policy(
                title='LambdaSDBCacheDomainPolicy',
                template=self.template,
                PolicyName='SDBDomain',
                PolicyDocument=Policy(
                    Version='2012-10-17',
                    Statement=[
                        Statement(
                            Effect=Allow,
                            Action=[Action("sdb","*")],
                            Resource=[
                                troposphere.Sub(
                                    'arn:aws:sdb:${AWS::Region}:${AWS::AccountId}:domain/${DomainName}',
                                    DomainName=troposphere.Ref('LambdaSDBCacheDomain')
                                )
                            ],
                        )
                    ],
                    Roles=troposphere.Ref(role_arn_param)
                )
            )
            sdb_policy.DependsOn = sdb_domain_resource
            self.awslambda_resource.DependsOn = sdb_domain_resource

        # Permissions
        # SNS Topic Lambda permissions and subscription
        idx = 1
        for sns_topic_ref in awslambda.sns_topics:
            # SNS Topic Arn parameters
            param_name = 'SNSTopicArn%d' % idx
            self.create_cfn_parameter(
                name=param_name,
                param_type='String',
                description='An SNS Topic ARN to grant permission to.',
                value=sns_topic_ref + '.arn'
            )

            # Lambda permission
            troposphere.awslambda.Permission(
                title=param_name + 'Permission',
                template=self.template,
                Action="lambda:InvokeFunction",
                FunctionName=troposphere.GetAtt(self.awslambda_resource, 'Arn'),
                Principal='sns.amazonaws.com',
                SourceArn=troposphere.Ref(param_name),
            )

            # SNS Topic subscription
            sns_topic = get_model_obj_from_ref(sns_topic_ref, self.paco_ctx.project)
            troposphere.sns.SubscriptionResource(
                title=param_name + 'Subscription',
                template=self.template,
                Endpoint=troposphere.GetAtt(self.awslambda_resource, 'Arn'),
                Protocol='lambda',
                TopicArn=troposphere.Ref(param_name),
                Region=sns_topic.region_name
            )
            idx += 1


        # Lambda permissions for connected Paco resources

        app = get_parent_by_interface(awslambda, schemas.IApplication)
        for obj in get_all_nodes(app):
            # S3 Bucket notification permission(s)
            if schemas.IS3Bucket.providedBy(obj):
                seen = {}
                if hasattr(obj, 'notifications'):
                    if hasattr(obj.notifications, 'lambdas'):
                        for lambda_notif in obj.notifications.lambdas:
                            if lambda_notif.function == awslambda.paco_ref:
                                # yes, this Lambda gets notification from this S3Bucket
                                group = get_parent_by_interface(obj, schemas.IResourceGroup)
                                s3_logical_name = self.gen_cf_logical_name(group.name + obj.name, '_')
                                if s3_logical_name not in seen:
                                    troposphere.awslambda.Permission(
                                        title='S3Bucket' + s3_logical_name,
                                        template=self.template,
                                        Action="lambda:InvokeFunction",
                                        FunctionName=troposphere.GetAtt(self.awslambda_resource, 'Arn'),
                                        Principal='s3.amazonaws.com',
                                        SourceArn='arn:aws:s3:::' + obj.get_bucket_name(),
                                    )
                                    seen[s3_logical_name] = True

            # Events Rule permission(s)
            if schemas.IEventsRule.providedBy(obj):
                seen = {}
                for target in obj.targets:
                    target_ref = Reference(target.target)
                    target_ref.set_account_name(account_ctx.get_name())
                    target_ref.set_region(aws_region)
                    lambda_ref = Reference(awslambda.paco_ref)

                    if target_ref.raw == lambda_ref.raw:
                        # yes, the Events Rule has a Target that is this Lambda
                        group = get_parent_by_interface(obj, schemas.IResourceGroup)
                        eventsrule_logical_name = self.gen_cf_logical_name(group.name + obj.name, '_')
                        if eventsrule_logical_name not in seen:
                            rule_name = create_event_rule_name(obj)
                            # rule_name = self.create_cfn_logical_id("EventsRule" + obj.paco_ref)
                            # rule_name = hash_smaller(rule_name, 64)
                            source_arn = 'arn:aws:events:{}:{}:rule/{}'.format(
                                aws_region,
                                account_ctx.id,
                                rule_name
                            )
                            troposphere.awslambda.Permission(
                                title='EventsRule' + eventsrule_logical_name,
                                template=self.template,
                                Action="lambda:InvokeFunction",
                                FunctionName=troposphere.GetAtt(self.awslambda_resource, 'Arn'),
                                Principal='events.amazonaws.com',
                                SourceArn=source_arn,
                            )
                            seen[eventsrule_logical_name] = True

            # IoT Analytics permission(s)
            if schemas.IIoTAnalyticsPipeline.providedBy(obj):
                seen = {}
                for activity in obj.pipeline_activities.values():
                    if activity.activity_type == 'lambda':
                        target_ref = Reference(activity.function)
                        target_ref.set_account_name(account_ctx.get_name())
                        target_ref.set_region(aws_region)
                        lambda_ref = Reference(awslambda.paco_ref)
                        if target_ref.raw == lambda_ref.raw:
                            # yes, the IoT Analytics Lambda Activity has a ref to this Lambda
                            group = get_parent_by_interface(obj, schemas.IResourceGroup)
                            iotap_logical_name = self.gen_cf_logical_name(group.name + obj.name, '_')
                            if iotap_logical_name not in seen:
                                rule_name = create_event_rule_name(obj)
                                troposphere.awslambda.Permission(
                                    title='IoTAnalyticsPipeline' + iotap_logical_name,
                                    template=self.template,
                                    Action="lambda:InvokeFunction",
                                    FunctionName=troposphere.GetAtt(self.awslambda_resource, 'Arn'),
                                    Principal='iotanalytics.amazonaws.com',
                                )
                                seen[iotap_logical_name] = True

        # Log group(s)
        loggroup_function_name = troposphere.Join(
            '', [
                '/aws/lambda/',
                troposphere.Select(
                    6, troposphere.Split(':', troposphere.GetAtt(self.awslambda_resource, 'Arn'))
                )
            ]
        )
        loggroup_resources = []
        loggroup_resources.append(
            self.add_log_group(loggroup_function_name, 'lambda')
        )
        if len(awslambda.log_group_names) > 0:
            # Additional App-specific LogGroups
            for loggroup_name in awslambda.log_group_names:
                # Add LogGroup to the template
                prefixed_loggroup_name = prefixed_name(awslambda, loggroup_name, self.paco_ctx.legacy_flag)
                loggroup_resources.append(
                    self.add_log_group(prefixed_loggroup_name)
                )

        # LogGroup permissions
        log_group_arns = [
            troposphere.Join(':', [
                f'arn:aws:logs:{self.aws_region}:{account_ctx.id}:log-group',
                loggroup_function_name,
                '*'
            ])
        ]
        log_stream_arns = [
            troposphere.Join(':', [
                f'arn:aws:logs:{self.aws_region}:{account_ctx.id}:log-group',
                loggroup_function_name,
                'log-stream',
                '*'
            ])
        ]
        for loggroup_name in awslambda.log_group_names:
            prefixed_loggroup_name = prefixed_name(awslambda, loggroup_name, self.paco_ctx.legacy_flag)
            log_group_arns.append(
                f'arn:aws:logs:{self.aws_region}:{account_ctx.id}:log-group:{prefixed_loggroup_name}:*'
            )
            log_stream_arns.append(
                f'arn:aws:logs:{self.aws_region}:{account_ctx.id}:log-group:{prefixed_loggroup_name}:log-stream:*'
            )

        loggroup_policy_resource = troposphere.iam.ManagedPolicy(
            title='LogGroupManagedPolicy',
            PolicyDocument=Policy(
                Version='2012-10-17',
                Statement=[
                    Statement(
                        Sid='AllowLambdaModifyLogStreams',
                        Effect=Allow,
                        Action=[
                            Action("logs","CreateLogStream"),
                            Action("logs","DescribeLogStreams"),
                        ],
                        Resource=log_group_arns,
                    ),
                    Statement(
                        Sid='AllowLambdaPutLogEvents',
                        Effect=Allow,
                        Action=[
                            Action("logs","PutLogEvents"),
                        ],
                        Resource=log_stream_arns,
                    ),
                ],
            ),
            Roles=[troposphere.Ref(role_name_param)],
        )
        loggroup_policy_resource.DependsOn = loggroup_resources
        self.template.add_resource(loggroup_policy_resource)

        # Outputs
        self.create_output(
            title='FunctionName',
            value=troposphere.Ref(self.awslambda_resource),
            ref=awslambda.paco_ref_parts + '.name',
        )
        self.create_output(
            title='FunctionArn',
            value=troposphere.GetAtt(self.awslambda_resource, 'Arn'),
            ref=awslambda.paco_ref_parts + '.arn',
        )
 Volumes=[troposphere.ecs.Volume(Name="data")],
 ContainerDefinitions=[
     troposphere.ecs.ContainerDefinition(
         Name="s3bundler",
         Cpu=170,
         MemoryReservation=128,
         Image=troposphere.Ref(S3BundlerURL),
         Environment=[
             troposphere.ecs.Environment(Name="AWS_DEFAULT_REGION",
                                         Value=troposphere.Ref(
                                             troposphere.AWS_REGION)),
             troposphere.ecs.Environment(Name="TMP", Value="/mnt")
         ],
         Command=[
             troposphere.If("CompressTrue", "-c", ""), "-q",
             troposphere.Select("5",
                                troposphere.Split(":", QueueChoice)),
             "-b", ArchiveS3Choice, "-p",
             troposphere.Ref(S3ArchivePrefix), "-s",
             troposphere.Ref(MaxSize)
         ],
         DockerLabels={"Project": "S3Bundler"},
         LogConfiguration=troposphere.ecs.LogConfiguration(
             LogDriver="awslogs",
             Options={
                 "awslogs-group": troposphere.Ref(s3bundlerlogs),
                 "awslogs-region":
                 troposphere.Ref(troposphere.AWS_REGION)
             }),
         Essential=True,
         DisableNetworking=False,
         ReadonlyRootFilesystem=False,
Exemplo n.º 4
0
    def __init__(self, stack, paco_ctx, role):
        super().__init__(stack, paco_ctx)
        self.set_aws_name('IoTAnalyticsPipeline', self.resource_group_name,
                          self.resource_name)
        iotap = self.resource

        # Init Troposphere template
        self.init_template('IoT Analytics pipeline')
        if not iotap.is_enabled():
            return

        # Role ARN for IoT
        role_arn_param = self.create_cfn_parameter(
            param_type='String',
            name='IoTRoleArn',
            description='IoT Topic Rule Service Role ARN',
            value=role.get_arn(),
        )

        # Channel Resource
        iotchannel_logical_id = 'IoTAnalyticsChannel'
        cfn_export_dict = {}
        if iotap.channel_storage.bucket == None:
            channel_storage_dict = {'ServiceManagedS3': {}}
            cfn_export_dict['RetentionPeriod'] = convert_expire_to_cfn_dict(
                iotap.channel_storage.expire_events_after_days)
        else:
            channel_bucket_param = self.create_cfn_parameter(
                param_type='String',
                name='IoTAnalyticsChannelBucketName',
                description='IoT Analytics Channel storage bucket name',
                value=iotap.channel_storage.bucket + '.name',
            )
            channel_storage_dict = {
                'CustomerManagedS3': {
                    'Bucket': troposphere.Ref(channel_bucket_param),
                    'KeyPrefix': iotap.channel_storage.key_prefix,
                    'RoleArn': troposphere.Ref(role_arn_param),
                }
            }
        cfn_export_dict['ChannelStorage'] = channel_storage_dict
        iot_channel_resource = troposphere.iotanalytics.Channel.from_dict(
            iotchannel_logical_id, cfn_export_dict)
        self.template.add_resource(iot_channel_resource)

        self.create_output(
            title='ChannelName',
            description='IoT Analytics Channel name',
            value=troposphere.Ref(iot_channel_resource),
            ref=self.resource.paco_ref_parts + '.channel.name',
        )

        # Datastore Resource
        iotchannel_logical_id = 'IoTAnalyticsDatastore'
        cfn_export_dict = {}
        if iotap.datastore_storage.bucket == None:
            datastore_storage_dict = {'ServiceManagedS3': {}}
            cfn_export_dict['RetentionPeriod'] = convert_expire_to_cfn_dict(
                iotap.datastore_storage.expire_events_after_days)
        else:
            datastore_bucket_param = self.create_cfn_parameter(
                param_type='String',
                name='IoTAnalyticsDatastoreBucketName',
                description='IoT Analytics Datastore storage bucket name',
                value=iotap.datastore_storage.bucket + '.name',
            )
            datastore_storage_dict = {
                'CustomerManagedS3': {
                    'Bucket': troposphere.Ref(datastore_bucket_param),
                    'KeyPrefix': iotap.datastore_storage.key_prefix,
                    'RoleArn': troposphere.Ref(role_arn_param),
                }
            }

        cfn_export_dict['DatastoreStorage'] = datastore_storage_dict
        if iotap.datastore_name != None:
            cfn_export_dict['DatastoreName'] = iotap.datastore_name
        iotap_datastore_resource = troposphere.iotanalytics.Datastore.from_dict(
            iotchannel_logical_id, cfn_export_dict)
        iotap_datastore_resource.DependsOn = iot_channel_resource
        self.template.add_resource(iotap_datastore_resource)

        self.create_output(
            title='DatastoreName',
            description='IoT Analytics Datastore name',
            value=troposphere.Ref(iotap_datastore_resource),
            ref=self.resource.paco_ref_parts + '.datastore.name',
        )

        # Pipeline Resource
        iotpipeline_logical_id = 'IoTAnalyticsPipeline'
        cfn_export_dict = {}
        cfn_export_dict['PipelineActivities'] = []
        idx = 0
        activity_list = list(iotap.pipeline_activities.values())

        # start with a Channel activity
        if len(activity_list) == 0:
            next_name = "DatastoreActivity"
        else:
            next_name = activity_list[idx].name + "Activity"
        cfn_export_dict['PipelineActivities'].append({
            'Channel': {
                'Name': "ChannelActivity",
                'ChannelName': troposphere.Ref(iot_channel_resource),
                'Next': next_name,
            }
        })

        for activity in iotap.pipeline_activities.values():
            if len(activity_list) == idx + 1:
                next_name = 'DatastoreActivity'
            else:
                next_name = activity_list[idx + 1].name + "Activity"
            if activity.activity_type == 'lambda':
                lambda_param = self.create_cfn_parameter(
                    param_type='String',
                    name=f'LambdaFunction{idx}',
                    description=f'IoT Analytics Lambda for Activity {idx}',
                    value=activity.function + '.arn',
                )
                if not activity.batch_size:
                    activity.batch_size = 1
                activity_dict = {
                    'Lambda': {
                        'LambdaName':
                        troposphere.Join('', [
                            '',
                            troposphere.Select(
                                6,
                                troposphere.Split(
                                    ':', troposphere.Ref(lambda_param)))
                        ]),
                        'BatchSize':
                        activity.batch_size,
                        'Name':
                        activity.name + "Activity",
                        'Next':
                        next_name,
                    }
                }
            elif activity.activity_type == 'add_attributes':
                activity_dict = {
                    'AddAttributes': {
                        'Name': activity.name + "Activity",
                        'Attributes': activity.attributes,
                        'Next': next_name,
                    }
                }
            elif activity.activity_type == 'remove_attributes':
                activity_dict = {
                    'RemoveAttributes': {
                        'Name': activity.name + "Activity",
                        'Attributes': activity.attribute_list,
                        'Next': next_name,
                    }
                }
            elif activity.activity_type == 'select_attributes':
                activity_dict = {
                    'SelectAttributes': {
                        'Name': activity.name + "Activity",
                        'Attributes': activity.attribute_list,
                        'Next': next_name,
                    }
                }
            elif activity.activity_type == 'filter':
                activity_dict = {
                    'Filter': {
                        'Name': activity.name + "Activity",
                        'Filter': activity.filter,
                        'Next': next_name,
                    }
                }
            elif activity.activity_type == 'math':
                activity_dict = {
                    'Math': {
                        'Name': activity.name + "Activity",
                        'Attribute': activity.attribute,
                        'Math': activity.math,
                        'Next': next_name,
                    }
                }
            elif activity.activity_type == 'device_registry_enrich':
                activity_dict = {
                    'DeviceRegistryEnrich': {
                        'Name': activity.name + "Activity",
                        'Attribute': activity.attribute,
                        'ThingName': activity.thing_name,
                        'Next': next_name,
                    }
                }
            elif activity.activity_type == 'device_shadow_enrich':
                activity_dict = {
                    'DeviceShadowEnrich': {
                        'Name': activity.name + "Activity",
                        'Attribute': activity.attribute,
                        'ThingName': activity.thing_name,
                        'Next': next_name,
                    }
                }

            cfn_export_dict['PipelineActivities'].append(activity_dict)
            idx += 1

        # finish with a Datastore activity
        cfn_export_dict['PipelineActivities'].append({
            'Datastore': {
                'Name': "DatastoreActivity",
                'DatastoreName': troposphere.Ref(iotap_datastore_resource),
            }
        })

        iotpipeline_resource = troposphere.iotanalytics.Pipeline.from_dict(
            iotpipeline_logical_id,
            cfn_export_dict,
        )
        iotpipeline_resource.DependsOn = [
            iot_channel_resource, iotap_datastore_resource
        ]
        self.template.add_resource(iotpipeline_resource)

        self.create_output(
            title='PipelineName',
            description='IoT Analytics Pipeline name',
            value=troposphere.Ref(iotpipeline_resource),
            ref=self.resource.paco_ref_parts + '.pipeline.name',
        )

        # Datasets
        for dataset in iotap.datasets.values():
            iotdataset_logical_id = self.create_cfn_logical_id(
                f'IoTDataset{dataset.name}')
            cfn_export_dict = {}
            cfn_export_dict['Actions'] = []
            if dataset.query_action != None:
                cfn_export_dict['Actions'].append({
                    'ActionName': dataset.name,
                    'QueryAction': {
                        'Filters': dataset.query_action.filters,
                        'SqlQuery': dataset.query_action.sql_query,
                    }
                })
            else:
                # ToDo: container_action
                pass
            cfn_export_dict['ContentDeliveryRules'] = []
            for delivery_rule in dataset.content_delivery_rules.values():
                delivery_dict = {
                    'Destination': {},
                    #    'EntryName': delivery_rule.name,
                }
                if delivery_rule.s3_destination != None:
                    bucket = get_model_obj_from_ref(
                        delivery_rule.s3_destination.bucket,
                        self.paco_ctx.project)
                    delivery_dict['Destination'][
                        'S3DestinationConfiguration'] = {
                            'Bucket': bucket.get_aws_name(),
                            'Key': delivery_rule.s3_destination.key,
                            'RoleArn': troposphere.Ref(role_arn_param),
                        }
                cfn_export_dict['ContentDeliveryRules'].append(delivery_dict)

            cfn_export_dict['RetentionPeriod'] = convert_expire_to_cfn_dict(
                dataset.expire_events_after_days)
            if dataset.version_history != None:
                if dataset.version_history == 0:
                    cfn_export_dict['VersioningConfiguration'] = {
                        'Unlimited': True
                    }
                else:
                    cfn_export_dict['VersioningConfiguration'] = {
                        'MaxVersions': dataset.version_history,
                        'Unlimited': False
                    }

            iot_dataset_resource = troposphere.iotanalytics.Dataset.from_dict(
                iotdataset_logical_id, cfn_export_dict)
            iot_dataset_resource.DependsOn = iotap_datastore_resource
            self.template.add_resource(iot_dataset_resource)

            self.create_output(
                title=f'{dataset.name}DatasetName',
                description=f'IoT Analytics Dataset {dataset.name}',
                value=troposphere.Ref(iot_dataset_resource),
                ref=self.resource.paco_ref_parts + '.dataset.' + dataset.name +
                '.name',
            )
Exemplo n.º 5
0
    def __init__(self, stack, paco_ctx, task_execution_role):
        ecs_config = stack.resource
        super().__init__(stack, paco_ctx)
        self.set_aws_name('ECS Services', self.resource_group_name, self.resource.name)

        self.init_template('Elastic Container Service (ECS) Services and TaskDefinitions')
        if not ecs_config.is_enabled(): return

        # Task Execution Role
        task_execution_role_param = self.create_cfn_parameter(
            name='TaskExecutionRole',
            param_type='String',
            description='Task Execution Role',
            value=task_execution_role.get_arn(),
        )

        # TaskDefinitions
        for task in ecs_config.task_definitions.values():
            task_dict = task.cfn_export_dict
            task_dict['ExecutionRoleArn'] = troposphere.Ref(task_execution_role_param)

            index = 0
            task._depends_on = []
            for container_definition in task.container_definitions.values():
                # ContainerDefinition Environment variables
                for env_pair in container_definition.environment:
                    key = env_pair.name
                    value = env_pair.value
                    # only paco refs are passed as Parameters to avoid tripping the 60 Parameter CloudFormation limit
                    if references.is_ref(value):
                        if type(value) == type(str()):
                            param_type = 'String'
                        elif type(value) == type(int()) or type(value) == type(float()):
                            param_type = 'Number'
                        else:
                            raise UnsupportedCloudFormationParameterType(
                                "Can not cast {} of type {} to a CloudFormation Parameter type.".format(
                                    value, type(value)
                                )
                            )
                        param_name = self.create_cfn_logical_id(f'{task.name}{container_definition.name}{key}')
                        environment_param = self.create_cfn_parameter(
                            param_type=param_type,
                            name=param_name,
                            description=f'Environment variable for container definition {container_definition.name} for task definition {task.name}',
                            value=value,
                        )
                        value = troposphere.Ref(environment_param)
                    if 'Environment' not in task_dict['ContainerDefinitions'][index]:
                        task_dict['ContainerDefinitions'][index]['Environment'] = []
                    task_dict['ContainerDefinitions'][index]['Environment'].append({'Name': key, 'Value': value})

                # Image can be a paco.ref to an ECR Repository
                if references.is_ref(container_definition.image):
                    param_name = self.create_cfn_logical_id(f'{task.name}{container_definition.name}Image')
                    image_arn_param = self.create_cfn_parameter(
                        param_type='String',
                        name=param_name,
                        description=f'Image used to start the container.',
                        value=container_definition.image + '.arn',
                    )
                    # The ECR URL needs to break apart the ARN and re-assemble it as the URL is no provided as a Stack Output :(
                    task_dict['ContainerDefinitions'][index]['Image'] = troposphere.Join(
                        ':', [
                            troposphere.Join(
                                '/', [
                                    # domain portion: aws_account_id.dkr.ecr.region.amazonaws.com
                                    troposphere.Join(
                                        '.', [
                                            troposphere.Select(4, troposphere.Split(':', troposphere.Ref(image_arn_param))), # account id
                                            'dkr',
                                            'ecr',
                                            troposphere.Select(3, troposphere.Split(':', troposphere.Ref(image_arn_param))), # region
                                            'amazonaws',
                                            'com',
                                        ]
                                    ),
                                    troposphere.Select(1, troposphere.Split('/', troposphere.Ref(image_arn_param))) # ecr-repo-name
                                ]
                            ),
                            container_definition.image_tag # image tag
                        ]
                    )
                else:
                    task_dict['ContainerDefinitions'][index]['Image'] = container_definition.image

                if getattr(container_definition, 'logging') != None:
                    task_dict['ContainerDefinitions'][index]['LogConfiguration'] = {}
                    log_dict = task_dict['ContainerDefinitions'][index]['LogConfiguration']
                    log_dict['LogDriver'] = container_definition.logging.driver
                    # Only awslogs supported for now
                    if container_definition.logging.driver == 'awslogs':
                        log_dict['Options'] = {}
                        log_dict['Options']['awslogs-region'] = troposphere.Ref('AWS::Region')
                        prefixed_log_group_name = prefixed_name(container_definition, task.name)
                        log_group_resource = self.add_log_group(prefixed_log_group_name, container_definition.logging.expire_events_after_days)
                        log_dict['Options']['awslogs-group'] = troposphere.Ref(log_group_resource)
                        task._depends_on.append(log_group_resource)
                        log_dict['Options']['awslogs-stream-prefix'] = container_definition.name
                index += 1

            # Setup Secrets
            for task_dict_container_def in task_dict['ContainerDefinitions']:
                if 'Secrets' in task_dict_container_def:
                    for secrets_pair in task_dict_container_def['Secrets']:
                        # Secerts Arn Parameters
                        name_hash = md5sum(str_data=secrets_pair['ValueFrom'])
                        secret_param_name = 'TaskDefinitionSecretArn'+name_hash
                        secret_param = self.create_cfn_parameter(
                            param_type='String',
                            name=secret_param_name,
                            description='The arn of the Secrets Manger Secret.',
                            value=secrets_pair['ValueFrom']+'.arn'
                        )
                        secrets_pair['ValueFrom'] = '!ManualTroposphereRef '+secret_param_name

            task_res = troposphere.ecs.TaskDefinition.from_dict(
                self.create_cfn_logical_id('TaskDefinition' + task.name),
                task_dict,
            )
            task_res.DependsOn = task._depends_on
            self.template.add_resource(task_res)
            task._troposphere_res = task_res

        # Cluster Param
        cluster_param = self.create_cfn_parameter(
            name='Cluster',
            param_type='String',
            description='Cluster Name',
            value=ecs_config.cluster + '.name',
        )

        #  Services
        # ToDo: allow multiple PrivateDnsNamespaces?
        # e.g. if multiple ECSServices want to particpate in the same PrivateDnsNamespace?
        if ecs_config.service_discovery_namespace_name != '':
            private_dns_vpc_param = self.create_cfn_parameter(
                param_type='String',
                name='PrivateDnsNamespaceVpc',
                description='The Vpc for the Service Discovery Private DNS Namespace.',
                value='paco.ref ' + '.'.join(ecs_config.paco_ref_parts.split('.')[:4]) + '.network.vpc.id'
            )
            private_dns_namespace_res = troposphere.servicediscovery.PrivateDnsNamespace(
                title=self.create_cfn_logical_id(f'DiscoveryService{ecs_config.service_discovery_namespace_name}'),
                Name=ecs_config.service_discovery_namespace_name,
                Vpc=troposphere.Ref(private_dns_vpc_param),
            )
            self.template.add_resource(private_dns_namespace_res)
        for service in ecs_config.services.values():
            service_dict = service.cfn_export_dict

            # Service Discovery
            if service.hostname != None:
                service_discovery_res = troposphere.servicediscovery.Service(
                    title=self.create_cfn_logical_id(f'DiscoveryService{service.name}'),
                    DnsConfig=troposphere.servicediscovery.DnsConfig(
                        DnsRecords=[
                            # troposphere.servicediscovery.DnsRecord(
                            #     TTL='60',
                            #     Type='A'
                            # ),
                            troposphere.servicediscovery.DnsRecord(
                                TTL='60',
                                Type='SRV'
                            )
                        ]
                    ),
                    HealthCheckCustomConfig=troposphere.servicediscovery.HealthCheckCustomConfig(FailureThreshold=float(1)),
                    NamespaceId=troposphere.Ref(private_dns_namespace_res),
                    Name=service.name,
                )
                service_discovery_res.DependsOn = [private_dns_namespace_res]
                self.template.add_resource(service_discovery_res)
                service_dict['ServiceRegistries'] = []
                for load_balancer in service.load_balancers:
                    service_registry_dict = {
                        'RegistryArn': troposphere.GetAtt(service_discovery_res, 'Arn'),
                        'ContainerName': load_balancer.container_name,
                        'ContainerPort': load_balancer.container_port,
                    }
                    # ToDo: add Port when needed ... 'Port': ?,
                    service_dict['ServiceRegistries'].append(service_registry_dict)

            # convert TargetGroup ref to a Parameter
            lb_idx = 0
            if 'LoadBalancers' in service_dict:
                for lb in service_dict['LoadBalancers']:
                    target_group_ref = lb['TargetGroupArn']
                    tg_param = self.create_cfn_parameter(
                        name=self.create_cfn_logical_id(f'TargetGroup{service.name}{lb_idx}'),
                        param_type='String',
                        description='Target Group ARN',
                        value=target_group_ref + '.arn',
                    )
                    lb['TargetGroupArn'] = troposphere.Ref(tg_param)
                    lb_idx += 1

            # Replace TaskDefinition name with a TaskDefinition ARN
            if 'TaskDefinition' in service_dict:
                service_dict['TaskDefinition'] = troposphere.Ref(
                    ecs_config.task_definitions[service_dict['TaskDefinition']]._troposphere_res
                )
            service_dict['Cluster'] = troposphere.Ref(cluster_param)
            service_res = troposphere.ecs.Service.from_dict(
                self.create_cfn_logical_id('Service' + service.name),
                service_dict
            )

            # Outputs
            self.create_output(
                title=service_res.title + 'Name',
                description="Service Name",
                value=troposphere.GetAtt(service_res, 'Name'),
                ref=service.paco_ref_parts + ".name"
            )

            self.template.add_resource(service_res)