Exemplo n.º 1
0
    def _create_lambdas(self):
        clean_pycache()

        for root, dirs, files in os.walk(LAMBDAS_DIR):
            for f in files:
                if f != "__init__.py":
                    continue

                parent_folder = os.path.basename(os.path.dirname(root))
                lambda_folder = os.path.basename(root)
                name = f"{parent_folder}-{lambda_folder}"
                lambda_config = self.lambdas_config[name]

                layers = []
                for layer_name in lambda_config["layers"]:
                    layers.append(self.layers[layer_name])

                lambda_role = Role(
                    self,
                    f"{name}_role",
                    assumed_by=ServicePrincipal(service="lambda.amazonaws.com")
                )
                for policy in lambda_config["policies"]:
                    lambda_role.add_to_policy(policy)
                lambda_role.add_managed_policy(
                    ManagedPolicy.from_aws_managed_policy_name("service-role/AWSLambdaBasicExecutionRole"))

                lambda_args = {
                    "code": Code.from_asset(root),
                    "handler": "__init__.handle",
                    "runtime": Runtime.PYTHON_3_8,
                    "layers": layers,
                    "function_name": name,
                    "environment": lambda_config["variables"],
                    "role": lambda_role,
                    "timeout": Duration.seconds(lambda_config["timeout"]),
                    "memory_size": lambda_config["memory"],
                }
                if "concurrent_executions" in lambda_config:
                    lambda_args["reserved_concurrent_executions"] = lambda_config["concurrent_executions"]

                self.lambdas[name] = Function(self, name, **lambda_args)

        self.lambdas["sqs_handlers-post_anime"].add_event_source(SqsEventSource(self.post_anime_queue))

        Rule(
            self,
            "titles_updater",
            schedule=Schedule.cron(hour="2", minute="10"),
            targets=[LambdaFunction(self.lambdas["crons-titles_updater"])]
        )
        Rule(
            self,
            "episodes_updater",
            schedule=Schedule.cron(hour="4", minute="10"),
            targets=[LambdaFunction(self.lambdas["crons-episodes_updater"])]
        )
Exemplo n.º 2
0
class Stack(core.Stack):
    def __init__(self, scope: core.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        self.event_bus = EventBus(scope=self,
                                  id='CustomEventBus',
                                  event_bus_name='CustomEventBus')

        self.source = Function(
            scope=self,
            id=f'SourceFunction',
            function_name=f'SourceFunction',
            code=Code.from_asset(path='./code_source/'),
            handler='index.handler',
            runtime=Runtime.PYTHON_3_6,
        )

        self.source.add_to_role_policy(statement=PolicyStatement(
            actions=['events:PutEvents'],
            resources=[self.event_bus.event_bus_arn]))
        """
        Define rule.
        """

        self.rule = Rule(
            scope=self,
            id='EventBusRule',
            description='Sample description.',
            enabled=True,
            event_bus=self.event_bus,
            event_pattern=EventPattern(detail={
                'Domain': ["MedInfo"],
                'Reason': ["InvokeTarget"]
            }),
            rule_name='EventBusRule',
        )
        """
        Add target.
        """

        self.target = Function(
            scope=self,
            id=f'TargetFunction',
            function_name=f'TargetFunction',
            code=Code.from_asset(path='./code_target/'),
            handler='index.handler',
            runtime=Runtime.PYTHON_3_6,
        )

        self.target: Union[IRuleTarget, LambdaFunction] = LambdaFunction(
            handler=self.target)
        self.rule.add_target(target=self.target)
Exemplo n.º 3
0
    def __init__(self, app: App, id: str, txt: str, env: dict,
                 policies: PolicyStatement, domain: str,
                 hosted_zone_id: str) -> None:
        super().__init__(app, id)
        env['HOSTED_ZONE_ID'] = hosted_zone_id

        self.function = SingletonFunction(self,
                                          '{}Function'.format('{}'.format(id)),
                                          uuid=str(uuid4()),
                                          code=Code.inline(txt),
                                          runtime=Runtime(
                                              'python3.7',
                                              supports_inline_code=True),
                                          handler='index.handler',
                                          environment=env)

        policy = Policy(self, '{}Policy'.format(id))
        self.function.role.attach_inline_policy(policy)
        policy.add_statements(policies)
        rule_target = LambdaFunction(self.function)

        current_time = datetime.now()
        run_time = current_time + timedelta(minutes=3)
        run_schedule = Schedule.cron(year=str(run_time.year),
                                     month=str(run_time.month),
                                     day=str(run_time.day),
                                     hour=str(run_time.hour),
                                     minute=str(run_time.minute))

        self.rule = Rule(self,
                         '{}Rule'.format(id),
                         enabled=True,
                         schedule=run_schedule,
                         targets=[rule_target])
Exemplo n.º 4
0
 async def create_canary_function(self, id: str) -> Function:
     function = None
     with open('canary/canary.py', 'r') as code:
         canary_code = code.read()
         function = Function(
             self,
             '{}CanaryFunction'.format(id),
             timeout=Duration.seconds(3),
             code=InlineCode(canary_code),
             handler='index.handler',
             tracing=Tracing.ACTIVE,
             initial_policy=[MINIMAL_FUNCTION_POLICY_STATEMENT],
             runtime=Runtime(
                 name='python3.7',
                 supports_inline_code=True,
             )
         )
     
     Rule(self,
          '{}CanaryRule'.format(id),
          enabled=True,
          schedule=Schedule.cron(),
          targets=[LambdaFunction(handler=function)])
          
     return function
Exemplo n.º 5
0
    def create_event_rule(self, func):
        event_pattern = EventPattern(source=['aws.ecs'],
                                     detail_type=['ECS Task State Change'],
                                     detail={
                                         'desiredStatus': ['RUNNING'],
                                         'lastStatus': ['RUNNING'],
                                     })
        rule = EventRule(
            self._stack,
            'public_dns_rule',
            event_pattern=event_pattern,
            enabled=True,
        )

        event_target = LambdaEventTarget(handler=func)
        rule.add_target(event_target)
        self._tag_it(rule)
Exemplo n.º 6
0
    def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        table_name = "posts2"
        function_name = "cl2"
        email = "*****@*****.**"

        table = Table(
            self,
            "cl_posts",
            table_name=table_name,
            partition_key=Attribute(name="url", type=AttributeType.STRING),
            time_to_live_attribute="ttl",
        )

        function = PythonFunction(
            self,
            "cl_function",
            function_name=function_name,
            entry="src",
            index="app.py",
            runtime=Runtime.PYTHON_3_8,
            environment={
                "cl_email": email,
                "cl_table_name": table_name
            },
            timeout=Duration.seconds(300),
            initial_policy=[
                PolicyStatement(
                    actions=["ses:SendEmail", "ses:VerifyEmailIdentity"],
                    resources=[
                        f"arn:aws:ses:{self.region}:{self.account}:identity/{email}"
                    ],
                ),
                PolicyStatement(
                    actions=[
                        "dynamodb:BatchGetItem", "dynamodb:BatchWriteItem"
                    ],
                    resources=[table.table_arn],
                ),
            ],
        )

        with open("events/event.json") as f:
            event = json.load(f)

        Rule(
            self,
            "cl_schedule",
            schedule=Schedule.expression("cron(0 19 * * ? *)"),
            targets=[
                LambdaFunction(function,
                               event=RuleTargetInput.from_object(event))
            ],
        )
Exemplo n.º 7
0
    def _create_lambdas(self):
        for root, dirs, files in os.walk(LAMBDAS_DIR):
            for f in files:
                if f != "__init__.py":
                    continue

                parent_folder = os.path.basename(os.path.dirname(root))
                lambda_folder = os.path.basename(root)
                name = f"{parent_folder}-{lambda_folder}"
                lambda_config = self.lambdas_config[name]

                layers = []
                for layer_name in lambda_config["layers"]:
                    layers.append(self.layers[layer_name])

                lambda_role = Role(self,
                                   f"{name}_role",
                                   assumed_by=ServicePrincipal(
                                       service="lambda.amazonaws.com"))
                for policy in lambda_config["policies"]:
                    lambda_role.add_to_policy(policy)
                lambda_role.add_managed_policy(
                    ManagedPolicy.from_aws_managed_policy_name(
                        "service-role/AWSLambdaBasicExecutionRole"))

                self.lambdas[name] = Function(
                    self,
                    name,
                    code=Code.from_asset(root),
                    handler="__init__.handle",
                    runtime=Runtime.PYTHON_3_8,
                    layers=layers,
                    function_name=name,
                    environment=lambda_config["variables"],
                    role=lambda_role,
                    timeout=Duration.seconds(lambda_config["timeout"]),
                    memory_size=lambda_config["memory"],
                )

        Rule(self,
             "update_eps",
             schedule=Schedule.cron(hour="2", minute="10"),
             targets=[LambdaFunction(self.lambdas["cron-update_eps"])])
Exemplo n.º 8
0
    def create_ecs_lambda(self, cluster: ICluster,
                          auto_scaling_group: AutoScalingGroup):
        lambda_func = Function(
            self,
            "LambdaECS",
            code=Code.from_asset("./lambdas/nlb-ecs"),
            handler="index.lambda_handler",
            runtime=Runtime.PYTHON_3_8,
            timeout=Duration.seconds(30),
            environment={
                "AUTO_SCALING_GROUP_NAME":
                auto_scaling_group.auto_scaling_group_name,
            },
        )
        lambda_func.add_to_role_policy(
            PolicyStatement(
                actions=[
                    "autoscaling:DescribeAutoScalingGroups",
                    "ssm:SendCommand",
                    "ssm:GetCommandInvocation",
                ],
                resources=[
                    "*",
                ],
            ))

        Rule(
            self,
            "ECS",
            event_pattern=EventPattern(
                detail_type=["ECS Task State Change"],
                detail={
                    "clusterArn": [cluster.cluster_arn],
                },
                source=["aws.ecs"],
            ),
            targets=[LambdaFunction(lambda_func)],
        )
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)
        
        mytopic = sns.Topic(
            self, "BillingAlert"
        )

        email_parameter = core.CfnParameter(self, "email-param")
        dailyBudget_parameter = core.CfnParameter(self, "DailyBudget")
        monthlyGrowthRate_parameter = core.CfnParameter(self, "MonthlyGrowthRate")
        S3CodePath_parameter = core.CfnParameter(self, "S3CodePath")

        emailAddress = getattr(email_parameter,"value_as_string")
        dailyBudget_value = getattr(dailyBudget_parameter,"value_as_string")
        monthlyGrowthRate_value = getattr(monthlyGrowthRate_parameter,"value_as_string")

        mytopic.add_subscription(subscriptions.EmailSubscription(emailAddress))
        myrole = iam.Role(self, "BillianAlertRole", assumed_by=iam.ServicePrincipal("lambda.amazonaws.com"))
        myrole.add_managed_policy(iam.ManagedPolicy.from_aws_managed_policy_name("AmazonSNSFullAccess"))
        myrole.add_managed_policy(iam.ManagedPolicy.from_aws_managed_policy_name("AWSLambdaBasicExecutionRole"))
        myrole.add_managed_policy(iam.ManagedPolicy.from_aws_managed_policy_name("CloudWatchFullAccess "))

        function = awslambda.Function(self, "MyLambda",
            code=awslambda.Code.from_cfn_parameters(object_key_param=S3CodePath_parameter),
            handler="lambda_function.py",
            runtime=awslambda.Runtime.PYTHON_3_7,
            role = myrole,
            function_name= "BillingAlert",
            memory_size= 3000
            )
        function.add_environment("DailyBudget", dailyBudget_value)
        function.add_environment("MonthlyGrowthRate", monthlyGrowthRate_value)
        function.add_environment("SNSARN", getattr(mytopic,"topic_arn"))
        targetFunction = LambdaFunction(function)
        Rule(self, "ScheduleRuleForBillingAlert",
            schedule=Schedule.cron(minute="0", hour="4"),
            targets=[targetFunction]
        )
Exemplo n.º 10
0
    def __init__(self, scope: core.Construct, construct_id: str,
                 stream: aws_kinesis.IStream, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        sample_device_producer = aws_lambda_python.PythonFunction(
            self,
            'SampleDeviceProducer',
            entry='stacks/sample_kinesis_stream_producer/producer_lambda',
            index='app.py',
            runtime=aws_lambda.Runtime.PYTHON_3_8,
            timeout=core.Duration.seconds(30))

        stream.grant_write(sample_device_producer)

        lambda_input = {"Stream": stream.stream_name}
        Rule(self,
             'ProducerTriggerEventRule',
             enabled=True,
             schedule=Schedule.rate(Duration.minutes(1)),
             targets=[
                 aws_events_targets.LambdaFunction(
                     handler=sample_device_producer,
                     event=RuleTargetInput.from_object(lambda_input))
             ])
Exemplo n.º 11
0
    def __init__(self, scope: Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # The code from zhxinyua to create VPC, s3_endpoint, bastion, EC2, EBS, Cloudwatch event rule stop EC2, Backup for EC2

        # create a new VPC
        vpc_new = aws_ec2.Vpc(self, "VpcFromCDK", cidr="10.0.0.0/16")
        vpc_new.add_gateway_endpoint(
            "S3Endpoint",
            service=aws_ec2.GatewayVpcEndpointAwsService.S3,
            # Add only to ISOLATED subnets
            subnets=[
                aws_ec2.SubnetSelection(subnet_type=aws_ec2.SubnetType.PUBLIC)
            ])

        # only allow a specific rang of IP to conncet bastion
        # BastionHostLinux support two way to connect, one is SSM, second is EC2 Instance Connect
        # EC2 Instance Connect are not supportd in CN
        host_bastion = aws_ec2.BastionHostLinux(
            self,
            "BastionHost",
            vpc=vpc_new,
            subnet_selection=aws_ec2.SubnetSelection(
                subnet_type=aws_ec2.SubnetType.PUBLIC))

        # write your own IP rang to access this bastion instead of 1.2.3.4/32
        host_bastion.allow_ssh_access_from(aws_ec2.Peer.ipv4("1.2.3.4/32"))

        # use amazon linux as OS
        amzn_linux = aws_ec2.MachineImage.latest_amazon_linux(
            generation=aws_ec2.AmazonLinuxGeneration.AMAZON_LINUX,
            edition=aws_ec2.AmazonLinuxEdition.STANDARD,
            virtualization=aws_ec2.AmazonLinuxVirt.HVM,
            storage=aws_ec2.AmazonLinuxStorage.GENERAL_PURPOSE)

        # secure group
        my_security_group = aws_ec2.SecurityGroup(
            self,
            "SecurityGroup",
            vpc=vpc_new,
            description="SecurityGroup from CDK",
            security_group_name="CDK SecurityGroup",
            allow_all_outbound=True,
        )

        my_security_group.add_ingress_rule(aws_ec2.Peer.ipv4('10.0.0.0/16'),
                                           aws_ec2.Port.tcp(22),
                                           "allow ssh access from the VPC")

        # set up an web instance in public subnet
        work_server = aws_ec2.Instance(
            self,
            "WebInstance",
            instance_type=aws_ec2.InstanceType("Write a EC2 instance type"),
            machine_image=amzn_linux,
            vpc=vpc_new,
            vpc_subnets=aws_ec2.SubnetSelection(
                subnet_type=aws_ec2.SubnetType.PUBLIC),
            security_group=my_security_group,
            key_name="Your SSH key pair name")

        # allow web connect
        work_server.connections.allow_from_any_ipv4(aws_ec2.Port.tcp(80),
                                                    "allow http from world")
        work_server.connections.allow_from_any_ipv4(aws_ec2.Port.tcp(443),
                                                    "allow https from world")

        # set a second ebs to web instance
        work_server.instance.add_property_override(
            "BlockDeviceMappings", [{
                "DeviceName": "/dev/sdb",
                "Ebs": {
                    "VolumeSize": "30",
                    "VolumeType": "gp2",
                    "DeleteOnTermination": "true"
                }
            }])

        # Cloudwatch event rule to stop instances every day in 15:00 UTC
        # they only use javascript SDK to call AWS API
        # https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_events_targets/AwsApi.html
        stop_EC2 = AwsApi(
            service="EC2",
            action="stopInstances",
            parameters={
                "InstanceIds":
                [work_server.instance_id, host_bastion.instance_id]
            })

        Rule(self,
             "ScheduleRule",
             schedule=Schedule.cron(minute="0", hour="15"),
             targets=[stop_EC2])

        # AWS backup part
        # create a BackupVault
        vault = backup.BackupVault(self,
                                   "BackupVault",
                                   backup_vault_name="CDK_Backup_Vault")

        # create a BackupPlan
        plan = backup.BackupPlan(self,
                                 "AWS-Backup-Plan",
                                 backup_plan_name="CDK_Backup")

        # add buackup resources with two way for two resources
        plan.add_selection(
            "Selection",
            resources=[
                backup.BackupResource.from_ec2_instance(work_server),
                backup.BackupResource.from_tag("Name", "BastionHost")
            ])

        # details with backup rules
        plan.add_rule(
            backup.BackupPlanRule(
                backup_vault=vault,
                rule_name="CDK_Backup_Rule",
                schedule_expression=Schedule.cron(minute="0",
                                                  hour="16",
                                                  day="1",
                                                  month="1-12"),
                delete_after=Duration.days(130),
                move_to_cold_storage_after=Duration.days(10)))

        # output information after deploy
        output = CfnOutput(self,
                           "BastionHost_information",
                           value=host_bastion.instance_public_ip,
                           description="BastionHost's Public IP")
        output = CfnOutput(self,
                           "WebHost_information",
                           value=work_server.instance_public_ip,
                           description="Web server's Public IP")
    def __init__(self, scope: cdk.Construct, construct_id: str, config, vpc: IVpc, instance: IInstance, neo4j_user_secret: ISecret, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        bucket = Bucket(self, "s3-bucket-altimeter", 
            bucket_name=config["s3_bucket"],
            encryption=BucketEncryption.UNENCRYPTED, #.S3_MANAGED, # Disable encryption since it's not really required and it conflicts with SCP guardrails set by Control Tower on the Audit account.
            block_public_access=BlockPublicAccess.BLOCK_ALL
        )

        cluster = Cluster(self, "ecs-cluster-altimeter", 
            cluster_name="ecsclstr-altimeter--default",
            vpc=vpc               
        )

        task_role = Role(self, "iam-role-altimeter-task-role",
            assumed_by=ServicePrincipal("ecs-tasks.amazonaws.com"),
            # It appears that within the account where the scanner is running, the task role is (partially) used for scanning resources (rather than the altimeter-scanner-access role).      
            managed_policies=[
                ManagedPolicy.from_aws_managed_policy_name('SecurityAudit'),
                ManagedPolicy.from_aws_managed_policy_name('job-function/ViewOnlyAccess')
            ]
        )

        task_definition = FargateTaskDefinition(self, "ecs-fgtd-altimeter",
            task_role=task_role,
            memory_limit_mib=self.MEMORY_LIMIT,
            cpu=self.CPU
        )

        docker_path = os.path.join(os.path.curdir,"..")

        image_asset = DockerImageAsset(self, 'ecr-assets-dia-altimeter', 
            directory=docker_path,
            file="scanner.Dockerfile"
        )            

        task_definition.add_container("ecs-container-altimeter",            
            image= ContainerImage.from_docker_image_asset(image_asset),
            # memory_limit_mib=self.MEMORY_LIMIT,
            # cpu=self.CPU,
            environment= {
                "CONFIG_PATH": config["altimeter_config_path"],
                "S3_BUCKET": config["s3_bucket"]
            },
            logging= AwsLogDriver(
                stream_prefix= 'altimeter',
                log_retention= RetentionDays.TWO_WEEKS
            )
        )

        task_definition.add_to_task_role_policy(PolicyStatement(
            resources=["arn:aws:iam::*:role/"+config["account_execution_role"]],
            actions=['sts:AssumeRole']
        ))

        task_definition.add_to_task_role_policy(PolicyStatement(
            resources=[
                "arn:aws:s3:::"+config["s3_bucket"],
                "arn:aws:s3:::"+config["s3_bucket"]+"/*"
            ],
            actions=["s3:GetObject*",
                "s3:GetBucket*",
                "s3:List*",
                "s3:DeleteObject*",
                "s3:PutObject",
                "s3:Abort*",
                "s3:PutObjectTagging"]
        ))

        # Grant the ability to record the stdout to CloudWatch Logs
        # TODO: Refine
        task_definition.add_to_task_role_policy(PolicyStatement(
            resources=["*"],
            actions=['logs:*']
        ))

        # Trigger task every 24 hours
        Rule(self, "events-rule-altimeter-daily-scan",
            rule_name="evrule--altimeter-daily-scan",
            schedule=Schedule.cron(hour="0", minute="0"),
            description="Daily altimeter scan",
            targets=[EcsTask(
                task_definition=task_definition,
                cluster=cluster,
                subnet_selection=SubnetSelection(subnet_type=SubnetType.PRIVATE)
            )]
        )

        # Trigger task manually via event
        Rule(self, "events-rule-altimeter-manual-scan",
            rule_name="evrule--altimeter-manual-scan",
            event_pattern=EventPattern(source=['altimeter']), 
            description="Manual altimeter scan",
            targets=[EcsTask(
                task_definition=task_definition,
                cluster=cluster,
                subnet_selection=SubnetSelection(subnet_type=SubnetType.PRIVATE)
            )]
        )        


        # Don't put Neo4j Importer lambda in a separate stack since it causes a circular reference with the S3 event source, and using an imported bucket as event source is not possible (you need a Bucket, not an IBucket)



        neo4j_importer_function = PythonFunction(self, 'lambda-function-neo4j-importer',
            function_name="function-altimeter--neo4j-importer",             
            entry="../neo4j-importer",
            index="app.py",
            handler="lambda_handler",
            runtime=Runtime.PYTHON_3_8,
            memory_size=256,
            timeout=cdk.Duration.seconds(60),
            vpc=vpc,
            vpc_subnets=SubnetSelection(subnets=vpc.select_subnets(subnet_group_name='Private').subnets),
            environment={
                "neo4j_address": instance.instance_private_ip,
                "neo4j_user_secret_name": neo4j_user_secret.secret_name
            }
        )

        neo4j_importer_function.add_event_source(
            S3EventSource(bucket,
                events= [EventType.OBJECT_CREATED, EventType.OBJECT_REMOVED],
                filters= [ { "prefix": "raw/", "suffix": ".rdf"}]
            )
        )

        # Grant lambda read/write access to the S3 bucket for reading raw rdf, writing prepared rdf and generating signed uri
        bucket.grant_read_write(neo4j_importer_function.role)
        # Grant lambda read access to the neo4j user secret
        neo4j_user_secret.grant_read(neo4j_importer_function.role)
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # SQS queue
        state_change_sqs = Queue(
            self,
            "state_change_sqs",
            visibility_timeout=core.Duration.seconds(60)
        )

        # Dynamodb Tables
        # EC2 state changes
        tb_states = Table(
            self, "ec2_states", partition_key=Attribute(name="instance-id",
                type=AttributeType.STRING),
            sort_key=Attribute(
                name="time",
                type=AttributeType.STRING
            ),
            billing_mode=BillingMode.PAY_PER_REQUEST,
            removal_policy=core.RemovalPolicy.DESTROY,
            stream=StreamViewType.NEW_IMAGE)

        # EC2 inventory
        tb_inventory = Table(
            self, "ec2_inventory", partition_key=Attribute(name="instance-id",
                type=AttributeType.STRING),
            sort_key=Attribute(
                name="time",
                type=AttributeType.STRING
            ),
            billing_mode=BillingMode.PAY_PER_REQUEST,
            removal_policy=core.RemovalPolicy.DESTROY,
            stream=StreamViewType.KEYS_ONLY)

        # IAM policies - AWS managed
        basic_exec = ManagedPolicy.from_aws_managed_policy_name("service-role/AWSLambdaBasicExecutionRole")
        sqs_access = ManagedPolicy(self, "LambdaSQSExecution",
            statements=[
                PolicyStatement(
                    effect=Effect.ALLOW,
                    actions=[
                        "sqs:ReceiveMessage",
                        "sqs:DeleteMessage",
                        "sqs:GetQueueAttributes"
                    ],
                    resources=[state_change_sqs.queue_arn]
                )])

        # IAM Policies
        pol_ec2_states_ro = ManagedPolicy(self, "pol_EC2StatesReadOnly",
            statements=[
                PolicyStatement(
                    effect=Effect.ALLOW,
                    actions=[
                        "dynamodb:DescribeStream",
                        "dynamodb:GetRecords",
                        "dynamodb:GetItem",
                        "dynamodb:GetShardIterator",
                        "dynamodb:ListStreams"
                    ],
                    resources=[tb_states.table_arn]
                )])

        pol_ec2_states_rwd = ManagedPolicy(
            self, "pol_EC2StatesWriteDelete",
            statements=[
                PolicyStatement(
                    effect=Effect.ALLOW,
                    actions=[
                        "dynamodb:DeleteItem",
                        "dynamodb:DescribeTable",
                        "dynamodb:PutItem",
                        "dynamodb:Query",
                        "dynamodb:UpdateItem"
                    ],
                    resources=[tb_states.table_arn]
                )])

        pol_ec2_inventory_full = ManagedPolicy(
            self, "pol_EC2InventoryFullAccess",
            statements=[
                PolicyStatement(
                    effect=Effect.ALLOW,
                    actions=[
                        "dynamodb:DeleteItem",
                        "dynamodb:DescribeTable",
                        "dynamodb:GetItem",
                        "dynamodb:PutItem",
                        "dynamodb:Query",
                        "dynamodb:UpdateItem"
                    ],
                    resources=[tb_inventory.table_arn]
                )])
        
        pol_lambda_describe_ec2 = ManagedPolicy(
            self, "pol_LambdaDescribeEC2",
            statements=[
                PolicyStatement(
                    effect=Effect.ALLOW,
                    actions=[
                        "ec2:Describe*"
                    ],
                    resources=["*"]
                )])

        # IAM Roles
        rl_event_capture = Role(
            self,
            'rl_state_capture',
            assumed_by=ServicePrincipal('lambda.amazonaws.com'),
            managed_policies=[basic_exec, sqs_access, pol_ec2_states_rwd]
            )

        rl_event_processor = Role(
            self,
            'rl_state_processor',
            assumed_by=ServicePrincipal('lambda.amazonaws.com'),
            managed_policies=[
                basic_exec,
                pol_ec2_states_ro,
                pol_ec2_states_rwd,
                pol_ec2_inventory_full,
                pol_lambda_describe_ec2])

        # event capture lambda
        lambda_event_capture = Function(
            self, "lambda_event_capture",
            handler="event_capture.handler",
            runtime=Runtime.PYTHON_3_7,
            code=Code.asset('event_capture'),
            role=rl_event_capture,
            events=[SqsEventSource(state_change_sqs)],
            environment={"state_table": tb_states.table_name}
        )

        # event processor lambda
        lambda_event_processor = Function(
            self, "lambda_event_processor",
            handler="event_processor.handler",
            runtime=Runtime.PYTHON_3_7,
            code=Code.asset('event_processor'),
            role=rl_event_processor,
            events=[
                DynamoEventSource(
                    tb_states,
                    starting_position=StartingPosition.LATEST)
            ],
            environment={
                "inventory_table": tb_inventory.table_name,
                }
        )

        # Cloudwatch Event
        event_ec2_change = Rule(
            self, "ec2_state_change",
            description="trigger on ec2 start, stop and terminate instances",
            event_pattern=EventPattern(
                source=["aws.ec2"],
                detail_type=["EC2 Instance State-change Notification"],
                detail={
                    "state": [
                        "running",
                        "stopped",
                        "terminated"]
                    }
                ),
            targets=[aws_events_targets.SqsQueue(state_change_sqs)]
        )

        # Outputs
        core.CfnOutput(self, "rl_state_capture_arn", value=rl_event_capture.role_arn)
        core.CfnOutput(self, "rl_state_processor_arn", value=rl_event_processor.role_arn)
        core.CfnOutput(self, "tb_states_arn", value=tb_states.table_arn)
        core.CfnOutput(self, "tb_inventory_arn", value=tb_inventory.table_arn)
        core.CfnOutput(self, "sqs_state_change", value=state_change_sqs.queue_arn)
Exemplo n.º 14
0
    def create_event_handling(
        self,
        secrets: List[secretsmanager.Secret],
        slack_host_ssm_name: str,
        slack_webhook_ssm_name: str,
    ) -> lambda_.Function:
        """

        Args:
            secrets: a list of secrets that we will track for events
            slack_host_ssm_name: the SSM parameter name for the slack host
            slack_webhook_ssm_name: the SSM parameter name for the slack webhook id

        Returns:
            a lambda event handler
        """
        dirname = os.path.dirname(__file__)
        filename = os.path.join(dirname, "runtime/notify_slack")

        env = {
            # for the moment we don't parametrise at the CDK level.. only needed if this is liable to change
            "SLACK_HOST_SSM_NAME": slack_host_ssm_name,
            "SLACK_WEBHOOK_SSM_NAME": slack_webhook_ssm_name,
        }

        notifier = lambda_.Function(
            self,
            "NotifySlack",
            runtime=lambda_.Runtime.PYTHON_3_8,
            code=lambda_.AssetCode(filename),
            handler="lambda_entrypoint.main",
            timeout=Duration.minutes(1),
            environment=env,
        )

        get_ssm_policy = PolicyStatement()

        # there is some weirdness around SSM parameter ARN formation and leading slashes.. can't be bothered
        # looking into right now - as the ones we want to use do a have a leading slash
        # but put in this exception in case
        if not slack_webhook_ssm_name.startswith(
                "/") or not slack_host_ssm_name.startswith("/"):
            raise Exception(
                "SSM parameters need to start with a leading slash")

        # see here - the *required* slash between parameter and the actual name uses the leading slash from the actual
        # name itself.. which is wrong..
        get_ssm_policy.add_resources(
            f"arn:aws:ssm:*:*:parameter{slack_host_ssm_name}")
        get_ssm_policy.add_resources(
            f"arn:aws:ssm:*:*:parameter{slack_webhook_ssm_name}")
        get_ssm_policy.add_actions("ssm:GetParameter")

        notifier.add_to_role_policy(get_ssm_policy)

        # we want a rule that traps all the rotation failures for our JWT secrets
        rule = Rule(
            self,
            "NotifySlackRule",
        )

        rule.add_event_pattern(
            source=["aws.secretsmanager"],
            detail={
                # at the moment only interested in these - add extra events into this array if wanting more
                "eventName": ["RotationFailed", "RotationSucceeded"],
                "additionalEventData": {
                    "SecretId": list(map(lambda s: s.secret_arn, secrets))
                },
            },
        )

        rule.add_target(LambdaFunction(notifier))

        return notifier
Exemplo n.º 15
0
    def __init__(self, scope: core.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        stack_util = StackUtil()
        repo = Repository.from_repository_attributes(
            scope=self,
            id='FunctionRepository',
            repository_name=stack_util.get_name('repo'),
            repository_arn=
            f'arn:aws:ecr:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:repository/{stack_util.get_name("repo")}'
        )

        fail_task = Fail(self, 'FailTask', comment='failed.')
        succeed_task = Succeed(self, 'SucceedTask', comment='succeeded.')

        get_forecast_function = get_get_forecast_resource(self, repo)
        send_message_function = get_send_message_resource(self, repo)

        get_forecast_task = LambdaInvoke(self,
                                         'GetForecastTask',
                                         lambda_function=get_forecast_function,
                                         input_path='$',
                                         result_path='$.get_forecast_task',
                                         output_path='$',
                                         payload_response_only=True)
        get_forecast_task.add_catch(fail_task,
                                    errors=[Errors.ALL],
                                    result_path='$.error_info')

        send_message_task = LambdaInvoke(self,
                                         'SendMessageTask',
                                         lambda_function=send_message_function,
                                         input_path='$.get_forecast_task.body',
                                         result_path='$.send_message_function',
                                         output_path='$',
                                         payload_response_only=True)
        send_message_task.add_catch(fail_task,
                                    errors=[Errors.ALL],
                                    result_path='$.error_info')

        state_machine = StateMachine(
            self,
            id='StateMachine',
            state_machine_name=stack_util.get_upper_name('STATE-MACHINE'),
            definition=get_forecast_task.next(send_message_task).next(
                succeed_task))

        today_rule = Rule(
            self,
            'StateMachineTodayRule',
            description='invoking state machine for today',
            rule_name=stack_util.get_upper_name('INVOKE-STATE-MACHINE-TODAY'),
            schedule=Schedule.cron(
                hour='23',
                minute='0',
            ))

        tomorrow_rule = Rule(self,
                             'StateMachineTomorrowRule',
                             description='invoking state machine for tomorrow',
                             rule_name=stack_util.get_upper_name(
                                 'INVOKE-STATE-MACHINE-TOMORROW'),
                             schedule=Schedule.cron(
                                 hour='9',
                                 minute='30',
                             ))

        today_target = SfnStateMachine(state_machine,
                                       input=RuleTargetInput.from_object({
                                           'city':
                                           '130010',
                                           'date_label':
                                           '今日'
                                       }))
        today_rule.add_target(today_target)

        tomorrow_target = SfnStateMachine(state_machine,
                                          input=RuleTargetInput.from_object({
                                              'city':
                                              '130010',
                                              'date_label':
                                              '明日'
                                          }))
        tomorrow_rule.add_target(tomorrow_target)