def _create_lambdas(self): clean_pycache() for root, dirs, files in os.walk(LAMBDAS_DIR): for f in files: if f != "__init__.py": continue parent_folder = os.path.basename(os.path.dirname(root)) lambda_folder = os.path.basename(root) name = f"{parent_folder}-{lambda_folder}" lambda_config = self.lambdas_config[name] layers = [] for layer_name in lambda_config["layers"]: layers.append(self.layers[layer_name]) lambda_role = Role( self, f"{name}_role", assumed_by=ServicePrincipal(service="lambda.amazonaws.com") ) for policy in lambda_config["policies"]: lambda_role.add_to_policy(policy) lambda_role.add_managed_policy( ManagedPolicy.from_aws_managed_policy_name("service-role/AWSLambdaBasicExecutionRole")) lambda_args = { "code": Code.from_asset(root), "handler": "__init__.handle", "runtime": Runtime.PYTHON_3_8, "layers": layers, "function_name": name, "environment": lambda_config["variables"], "role": lambda_role, "timeout": Duration.seconds(lambda_config["timeout"]), "memory_size": lambda_config["memory"], } if "concurrent_executions" in lambda_config: lambda_args["reserved_concurrent_executions"] = lambda_config["concurrent_executions"] self.lambdas[name] = Function(self, name, **lambda_args) self.lambdas["sqs_handlers-post_anime"].add_event_source(SqsEventSource(self.post_anime_queue)) Rule( self, "titles_updater", schedule=Schedule.cron(hour="2", minute="10"), targets=[LambdaFunction(self.lambdas["crons-titles_updater"])] ) Rule( self, "episodes_updater", schedule=Schedule.cron(hour="4", minute="10"), targets=[LambdaFunction(self.lambdas["crons-episodes_updater"])] )
def __init__(self, app: App, id: str, txt: str, env: dict, policies: PolicyStatement, domain: str, hosted_zone_id: str) -> None: super().__init__(app, id) env['HOSTED_ZONE_ID'] = hosted_zone_id self.function = SingletonFunction(self, '{}Function'.format('{}'.format(id)), uuid=str(uuid4()), code=Code.inline(txt), runtime=Runtime( 'python3.7', supports_inline_code=True), handler='index.handler', environment=env) policy = Policy(self, '{}Policy'.format(id)) self.function.role.attach_inline_policy(policy) policy.add_statements(policies) rule_target = LambdaFunction(self.function) current_time = datetime.now() run_time = current_time + timedelta(minutes=3) run_schedule = Schedule.cron(year=str(run_time.year), month=str(run_time.month), day=str(run_time.day), hour=str(run_time.hour), minute=str(run_time.minute)) self.rule = Rule(self, '{}Rule'.format(id), enabled=True, schedule=run_schedule, targets=[rule_target])
async def create_canary_function(self, id: str) -> Function: function = None with open('canary/canary.py', 'r') as code: canary_code = code.read() function = Function( self, '{}CanaryFunction'.format(id), timeout=Duration.seconds(3), code=InlineCode(canary_code), handler='index.handler', tracing=Tracing.ACTIVE, initial_policy=[MINIMAL_FUNCTION_POLICY_STATEMENT], runtime=Runtime( name='python3.7', supports_inline_code=True, ) ) Rule(self, '{}CanaryRule'.format(id), enabled=True, schedule=Schedule.cron(), targets=[LambdaFunction(handler=function)]) return function
def _create_lambdas(self): for root, dirs, files in os.walk(LAMBDAS_DIR): for f in files: if f != "__init__.py": continue parent_folder = os.path.basename(os.path.dirname(root)) lambda_folder = os.path.basename(root) name = f"{parent_folder}-{lambda_folder}" lambda_config = self.lambdas_config[name] layers = [] for layer_name in lambda_config["layers"]: layers.append(self.layers[layer_name]) lambda_role = Role(self, f"{name}_role", assumed_by=ServicePrincipal( service="lambda.amazonaws.com")) for policy in lambda_config["policies"]: lambda_role.add_to_policy(policy) lambda_role.add_managed_policy( ManagedPolicy.from_aws_managed_policy_name( "service-role/AWSLambdaBasicExecutionRole")) self.lambdas[name] = Function( self, name, code=Code.from_asset(root), handler="__init__.handle", runtime=Runtime.PYTHON_3_8, layers=layers, function_name=name, environment=lambda_config["variables"], role=lambda_role, timeout=Duration.seconds(lambda_config["timeout"]), memory_size=lambda_config["memory"], ) Rule(self, "update_eps", schedule=Schedule.cron(hour="2", minute="10"), targets=[LambdaFunction(self.lambdas["cron-update_eps"])])
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) mytopic = sns.Topic( self, "BillingAlert" ) email_parameter = core.CfnParameter(self, "email-param") dailyBudget_parameter = core.CfnParameter(self, "DailyBudget") monthlyGrowthRate_parameter = core.CfnParameter(self, "MonthlyGrowthRate") S3CodePath_parameter = core.CfnParameter(self, "S3CodePath") emailAddress = getattr(email_parameter,"value_as_string") dailyBudget_value = getattr(dailyBudget_parameter,"value_as_string") monthlyGrowthRate_value = getattr(monthlyGrowthRate_parameter,"value_as_string") mytopic.add_subscription(subscriptions.EmailSubscription(emailAddress)) myrole = iam.Role(self, "BillianAlertRole", assumed_by=iam.ServicePrincipal("lambda.amazonaws.com")) myrole.add_managed_policy(iam.ManagedPolicy.from_aws_managed_policy_name("AmazonSNSFullAccess")) myrole.add_managed_policy(iam.ManagedPolicy.from_aws_managed_policy_name("AWSLambdaBasicExecutionRole")) myrole.add_managed_policy(iam.ManagedPolicy.from_aws_managed_policy_name("CloudWatchFullAccess ")) function = awslambda.Function(self, "MyLambda", code=awslambda.Code.from_cfn_parameters(object_key_param=S3CodePath_parameter), handler="lambda_function.py", runtime=awslambda.Runtime.PYTHON_3_7, role = myrole, function_name= "BillingAlert", memory_size= 3000 ) function.add_environment("DailyBudget", dailyBudget_value) function.add_environment("MonthlyGrowthRate", monthlyGrowthRate_value) function.add_environment("SNSARN", getattr(mytopic,"topic_arn")) targetFunction = LambdaFunction(function) Rule(self, "ScheduleRuleForBillingAlert", schedule=Schedule.cron(minute="0", hour="4"), targets=[targetFunction] )
def __init__(self, scope: Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # The code from zhxinyua to create VPC, s3_endpoint, bastion, EC2, EBS, Cloudwatch event rule stop EC2, Backup for EC2 # create a new VPC vpc_new = aws_ec2.Vpc(self, "VpcFromCDK", cidr="10.0.0.0/16") vpc_new.add_gateway_endpoint( "S3Endpoint", service=aws_ec2.GatewayVpcEndpointAwsService.S3, # Add only to ISOLATED subnets subnets=[ aws_ec2.SubnetSelection(subnet_type=aws_ec2.SubnetType.PUBLIC) ]) # only allow a specific rang of IP to conncet bastion # BastionHostLinux support two way to connect, one is SSM, second is EC2 Instance Connect # EC2 Instance Connect are not supportd in CN host_bastion = aws_ec2.BastionHostLinux( self, "BastionHost", vpc=vpc_new, subnet_selection=aws_ec2.SubnetSelection( subnet_type=aws_ec2.SubnetType.PUBLIC)) # write your own IP rang to access this bastion instead of 1.2.3.4/32 host_bastion.allow_ssh_access_from(aws_ec2.Peer.ipv4("1.2.3.4/32")) # use amazon linux as OS amzn_linux = aws_ec2.MachineImage.latest_amazon_linux( generation=aws_ec2.AmazonLinuxGeneration.AMAZON_LINUX, edition=aws_ec2.AmazonLinuxEdition.STANDARD, virtualization=aws_ec2.AmazonLinuxVirt.HVM, storage=aws_ec2.AmazonLinuxStorage.GENERAL_PURPOSE) # secure group my_security_group = aws_ec2.SecurityGroup( self, "SecurityGroup", vpc=vpc_new, description="SecurityGroup from CDK", security_group_name="CDK SecurityGroup", allow_all_outbound=True, ) my_security_group.add_ingress_rule(aws_ec2.Peer.ipv4('10.0.0.0/16'), aws_ec2.Port.tcp(22), "allow ssh access from the VPC") # set up an web instance in public subnet work_server = aws_ec2.Instance( self, "WebInstance", instance_type=aws_ec2.InstanceType("Write a EC2 instance type"), machine_image=amzn_linux, vpc=vpc_new, vpc_subnets=aws_ec2.SubnetSelection( subnet_type=aws_ec2.SubnetType.PUBLIC), security_group=my_security_group, key_name="Your SSH key pair name") # allow web connect work_server.connections.allow_from_any_ipv4(aws_ec2.Port.tcp(80), "allow http from world") work_server.connections.allow_from_any_ipv4(aws_ec2.Port.tcp(443), "allow https from world") # set a second ebs to web instance work_server.instance.add_property_override( "BlockDeviceMappings", [{ "DeviceName": "/dev/sdb", "Ebs": { "VolumeSize": "30", "VolumeType": "gp2", "DeleteOnTermination": "true" } }]) # Cloudwatch event rule to stop instances every day in 15:00 UTC # they only use javascript SDK to call AWS API # https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_events_targets/AwsApi.html stop_EC2 = AwsApi( service="EC2", action="stopInstances", parameters={ "InstanceIds": [work_server.instance_id, host_bastion.instance_id] }) Rule(self, "ScheduleRule", schedule=Schedule.cron(minute="0", hour="15"), targets=[stop_EC2]) # AWS backup part # create a BackupVault vault = backup.BackupVault(self, "BackupVault", backup_vault_name="CDK_Backup_Vault") # create a BackupPlan plan = backup.BackupPlan(self, "AWS-Backup-Plan", backup_plan_name="CDK_Backup") # add buackup resources with two way for two resources plan.add_selection( "Selection", resources=[ backup.BackupResource.from_ec2_instance(work_server), backup.BackupResource.from_tag("Name", "BastionHost") ]) # details with backup rules plan.add_rule( backup.BackupPlanRule( backup_vault=vault, rule_name="CDK_Backup_Rule", schedule_expression=Schedule.cron(minute="0", hour="16", day="1", month="1-12"), delete_after=Duration.days(130), move_to_cold_storage_after=Duration.days(10))) # output information after deploy output = CfnOutput(self, "BastionHost_information", value=host_bastion.instance_public_ip, description="BastionHost's Public IP") output = CfnOutput(self, "WebHost_information", value=work_server.instance_public_ip, description="Web server's Public IP")
def __init__(self, scope: cdk.Construct, construct_id: str, config, vpc: IVpc, instance: IInstance, neo4j_user_secret: ISecret, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) bucket = Bucket(self, "s3-bucket-altimeter", bucket_name=config["s3_bucket"], encryption=BucketEncryption.UNENCRYPTED, #.S3_MANAGED, # Disable encryption since it's not really required and it conflicts with SCP guardrails set by Control Tower on the Audit account. block_public_access=BlockPublicAccess.BLOCK_ALL ) cluster = Cluster(self, "ecs-cluster-altimeter", cluster_name="ecsclstr-altimeter--default", vpc=vpc ) task_role = Role(self, "iam-role-altimeter-task-role", assumed_by=ServicePrincipal("ecs-tasks.amazonaws.com"), # It appears that within the account where the scanner is running, the task role is (partially) used for scanning resources (rather than the altimeter-scanner-access role). managed_policies=[ ManagedPolicy.from_aws_managed_policy_name('SecurityAudit'), ManagedPolicy.from_aws_managed_policy_name('job-function/ViewOnlyAccess') ] ) task_definition = FargateTaskDefinition(self, "ecs-fgtd-altimeter", task_role=task_role, memory_limit_mib=self.MEMORY_LIMIT, cpu=self.CPU ) docker_path = os.path.join(os.path.curdir,"..") image_asset = DockerImageAsset(self, 'ecr-assets-dia-altimeter', directory=docker_path, file="scanner.Dockerfile" ) task_definition.add_container("ecs-container-altimeter", image= ContainerImage.from_docker_image_asset(image_asset), # memory_limit_mib=self.MEMORY_LIMIT, # cpu=self.CPU, environment= { "CONFIG_PATH": config["altimeter_config_path"], "S3_BUCKET": config["s3_bucket"] }, logging= AwsLogDriver( stream_prefix= 'altimeter', log_retention= RetentionDays.TWO_WEEKS ) ) task_definition.add_to_task_role_policy(PolicyStatement( resources=["arn:aws:iam::*:role/"+config["account_execution_role"]], actions=['sts:AssumeRole'] )) task_definition.add_to_task_role_policy(PolicyStatement( resources=[ "arn:aws:s3:::"+config["s3_bucket"], "arn:aws:s3:::"+config["s3_bucket"]+"/*" ], actions=["s3:GetObject*", "s3:GetBucket*", "s3:List*", "s3:DeleteObject*", "s3:PutObject", "s3:Abort*", "s3:PutObjectTagging"] )) # Grant the ability to record the stdout to CloudWatch Logs # TODO: Refine task_definition.add_to_task_role_policy(PolicyStatement( resources=["*"], actions=['logs:*'] )) # Trigger task every 24 hours Rule(self, "events-rule-altimeter-daily-scan", rule_name="evrule--altimeter-daily-scan", schedule=Schedule.cron(hour="0", minute="0"), description="Daily altimeter scan", targets=[EcsTask( task_definition=task_definition, cluster=cluster, subnet_selection=SubnetSelection(subnet_type=SubnetType.PRIVATE) )] ) # Trigger task manually via event Rule(self, "events-rule-altimeter-manual-scan", rule_name="evrule--altimeter-manual-scan", event_pattern=EventPattern(source=['altimeter']), description="Manual altimeter scan", targets=[EcsTask( task_definition=task_definition, cluster=cluster, subnet_selection=SubnetSelection(subnet_type=SubnetType.PRIVATE) )] ) # Don't put Neo4j Importer lambda in a separate stack since it causes a circular reference with the S3 event source, and using an imported bucket as event source is not possible (you need a Bucket, not an IBucket) neo4j_importer_function = PythonFunction(self, 'lambda-function-neo4j-importer', function_name="function-altimeter--neo4j-importer", entry="../neo4j-importer", index="app.py", handler="lambda_handler", runtime=Runtime.PYTHON_3_8, memory_size=256, timeout=cdk.Duration.seconds(60), vpc=vpc, vpc_subnets=SubnetSelection(subnets=vpc.select_subnets(subnet_group_name='Private').subnets), environment={ "neo4j_address": instance.instance_private_ip, "neo4j_user_secret_name": neo4j_user_secret.secret_name } ) neo4j_importer_function.add_event_source( S3EventSource(bucket, events= [EventType.OBJECT_CREATED, EventType.OBJECT_REMOVED], filters= [ { "prefix": "raw/", "suffix": ".rdf"}] ) ) # Grant lambda read/write access to the S3 bucket for reading raw rdf, writing prepared rdf and generating signed uri bucket.grant_read_write(neo4j_importer_function.role) # Grant lambda read access to the neo4j user secret neo4j_user_secret.grant_read(neo4j_importer_function.role)
def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) stack_util = StackUtil() repo = Repository.from_repository_attributes( scope=self, id='FunctionRepository', repository_name=stack_util.get_name('repo'), repository_arn= f'arn:aws:ecr:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:repository/{stack_util.get_name("repo")}' ) fail_task = Fail(self, 'FailTask', comment='failed.') succeed_task = Succeed(self, 'SucceedTask', comment='succeeded.') get_forecast_function = get_get_forecast_resource(self, repo) send_message_function = get_send_message_resource(self, repo) get_forecast_task = LambdaInvoke(self, 'GetForecastTask', lambda_function=get_forecast_function, input_path='$', result_path='$.get_forecast_task', output_path='$', payload_response_only=True) get_forecast_task.add_catch(fail_task, errors=[Errors.ALL], result_path='$.error_info') send_message_task = LambdaInvoke(self, 'SendMessageTask', lambda_function=send_message_function, input_path='$.get_forecast_task.body', result_path='$.send_message_function', output_path='$', payload_response_only=True) send_message_task.add_catch(fail_task, errors=[Errors.ALL], result_path='$.error_info') state_machine = StateMachine( self, id='StateMachine', state_machine_name=stack_util.get_upper_name('STATE-MACHINE'), definition=get_forecast_task.next(send_message_task).next( succeed_task)) today_rule = Rule( self, 'StateMachineTodayRule', description='invoking state machine for today', rule_name=stack_util.get_upper_name('INVOKE-STATE-MACHINE-TODAY'), schedule=Schedule.cron( hour='23', minute='0', )) tomorrow_rule = Rule(self, 'StateMachineTomorrowRule', description='invoking state machine for tomorrow', rule_name=stack_util.get_upper_name( 'INVOKE-STATE-MACHINE-TOMORROW'), schedule=Schedule.cron( hour='9', minute='30', )) today_target = SfnStateMachine(state_machine, input=RuleTargetInput.from_object({ 'city': '130010', 'date_label': '今日' })) today_rule.add_target(today_target) tomorrow_target = SfnStateMachine(state_machine, input=RuleTargetInput.from_object({ 'city': '130010', 'date_label': '明日' })) tomorrow_rule.add_target(tomorrow_target)