def _add_code_build_notification_rule(self): code_build_notification_rule = events.CfnRule( self.stack_scope, "CodeBuildNotificationRule", event_pattern={ "detail": { "build-status": ["FAILED", "STOPPED", "SUCCEEDED"], "project-name": [self._code_build_image_builder_project.ref], }, "detail-type": ["CodeBuild Build State Change"], "source": ["aws.codebuild"], }, state="ENABLED", targets=[ events.CfnRule.TargetProperty( arn=self._code_build_notification_lambda.attr_arn, id="BuildNotificationFunction", ) ], ) awslambda.CfnPermission( self.stack_scope, "BuildNotificationFunctionInvokePermission", action="lambda:InvokeFunction", function_name=self._code_build_notification_lambda.attr_arn, principal="events.amazonaws.com", source_arn=code_build_notification_rule.attr_arn, ) return code_build_notification_rule
def __create_s3_trigger_lambda_invoke_permission( self, bucket_name: str, s3_trigger_lambda_function: aws_lambda.Function ) -> aws_lambda.Permission: return aws_lambda.CfnPermission( self, 'S3TriggerLambdaInvokePermission', function_name=s3_trigger_lambda_function.function_name, action='lambda:InvokeFunction', principal='s3.amazonaws.com', source_account=Fn.ref('AWS::AccountId'), source_arn=f'arn:aws:s3:::{bucket_name}')
def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) # Import Lambda ARN, REST API ID, and Method ARN from stack exports LAMBDA_ARN = Fn.import_value("MyLambdaFunction") API_ID = Fn.import_value("MyAPIGWID") METHOD_ARN = Fn.import_value("MyMethodArn") STAGE_NAME = Fn.import_value("MyStageName") # Import Lambda function from ARN lambda_fn = lambda_.Function.from_function_arn(self, 'lambda_fn', LAMBDA_ARN) # Create new function version version = lambda_.CfnVersion(self, "lambdaVersion", function_name=lambda_fn.function_name) # Save Versions when deleting stack for canary promotion and additional deployments version.apply_removal_policy(policy=RemovalPolicy.RETAIN) # Create Dev alias alias = lambda_.CfnAlias(self, "lambdaAlias", function_name=lambda_fn.function_name, function_version=version.attr_version, name="Dev") # Add permissions for Dev alias permission = lambda_.CfnPermission( self, "aliasPermission", action="lambda:InvokeFunction", function_name=alias.ref, principal="apigateway.amazonaws.com", source_arn=METHOD_ARN.replace(STAGE_NAME, "*")) # Create a canary deployment canary_deployment = apigateway.CfnDeployment( self, "CanaryDeployment", rest_api_id=API_ID, deployment_canary_settings=apigateway.CfnDeployment. DeploymentCanarySettingsProperty( percent_traffic=50, stage_variable_overrides={"lambdaAlias": "Dev"}), stage_name="prod")
def __init__( self, scope: core.Construct, id: str, map_params: dict, **kwargs ): # pylint: disable=W0622 super().__init__(scope, id, **kwargs) LOGGER.debug('Notification configuration required for %s', map_params['name']) stack = core.Stack.of(self) # pylint: disable=no-value-for-parameter _slack_func = _lambda.Function.from_function_arn( self, 'slack_lambda_function', f'arn:{stack.partition}:lambda:{ADF_DEPLOYMENT_REGION}:' f'{ADF_DEPLOYMENT_ACCOUNT_ID}:function:SendSlackNotification' ) kms_alias = _kms.Alias.from_alias_name(self, "KMSAlias", f"alias/codepipeline-{ADF_DEPLOYMENT_ACCOUNT_ID}") _topic = _sns.Topic(self, "PipelineTopic", master_key=kms_alias) _statement = _iam.PolicyStatement( actions=["sns:Publish"], effect=_iam.Effect.ALLOW, principals=[ _iam.ServicePrincipal("sns.amazonaws.com"), _iam.ServicePrincipal("codecommit.amazonaws.com"), _iam.ServicePrincipal("events.amazonaws.com"), ], resources=["*"], ) _topic.add_to_resource_policy(_statement) _endpoint = map_params.get("params", {}).get("notification_endpoint", "") _sub = _sns.Subscription( self, "sns_subscription", topic=_topic, endpoint=_endpoint if "@" in _endpoint else _slack_func.function_arn, protocol=_sns.SubscriptionProtocol.EMAIL if "@" in _endpoint else _sns.SubscriptionProtocol.LAMBDA, ) if "@" not in _endpoint: _lambda.CfnPermission( self, "slack_notification_sns_permissions", principal="sns.amazonaws.com", action="lambda:InvokeFunction", source_arn=_topic.topic_arn, function_name="SendSlackNotification", ) _slack_func.add_event_source(source=_event_sources.SnsEventSource(_topic)) self.topic_arn = _topic.topic_arn
def __init__(self, scope: core.Construct, id: str, map_params: dict, **kwargs): #pylint: disable=W0622 super().__init__(scope, id, **kwargs) LOGGER.debug('Notification configuration required for %s', map_params['name']) # pylint: disable=no-value-for-parameter _slack_func = _lambda.Function.from_function_arn( self, 'slack_lambda_function', 'arn:aws:lambda:{0}:{1}:function:SendSlackNotification'.format( ADF_DEPLOYMENT_REGION, ADF_DEPLOYMENT_ACCOUNT_ID)) _topic = _sns.Topic(self, 'PipelineTopic') _statement = _iam.PolicyStatement( actions=["sns:Publish"], effect=_iam.Effect.ALLOW, principals=[ _iam.ServicePrincipal('sns.amazonaws.com'), _iam.ServicePrincipal('codecommit.amazonaws.com'), _iam.ServicePrincipal('events.amazonaws.com') ], resources=["*"]) _topic.add_to_resource_policy(_statement) _lambda.CfnPermission(self, 'slack_notification_sns_permissions', principal='sns.amazonaws.com', action='lambda:InvokeFunction', source_arn=_topic.topic_arn, function_name='SendSlackNotification') _endpoint = map_params.get('params', {}).get('notification_endpoint', '') _sub = _sns.Subscription( self, 'sns_subscription', topic=_topic, endpoint=_endpoint if '@' in _endpoint else _slack_func.function_arn, protocol=_sns.SubscriptionProtocol.EMAIL if '@' in _endpoint else _sns.SubscriptionProtocol.LAMBDA) if '@' not in _endpoint: _slack_func.add_event_source( source=_event_sources.SnsEventSource(_topic)) self.topic_arn = _topic.topic_arn
def __init__(self, scope: core.Construct, id: str, props: KinesisFirehoseStackProps, **kwargs) -> None: super().__init__(scope, id, **kwargs) lambda_repository = aws_codecommit.Repository( self, "ClicksProcessingLambdaRepository", repository_name="MythicalMysfits-ClicksProcessingLambdaRepository", ) core.CfnOutput( self, "kinesisRepositoryCloneUrlHttp", value=lambda_repository.repository_clone_url_http, description="Clicks Processing Lambda Repository Clone URL HTTP", ) core.CfnOutput( self, "kinesisRepositoryCloneUrlSsh", value=lambda_repository.repository_clone_url_ssh, description="Clicks Processing Lambda Repository Clone URL SSH", ) clicks_destination_bucket = aws_s3.Bucket(self, "Bucket", versioned=True) lambda_function_policy = aws_iam.PolicyStatement() lambda_function_policy.add_actions("dynamodb:GetItem") lambda_function_policy.add_resources(props.table.table_arn) mysfits_clicks_processor = aws_lambda.Function( self, "Function", handler="streamProcessor.processRecord", runtime=aws_lambda.Runtime.PYTHON_3_7, description= "An Amazon Kinesis Firehose stream processor that enriches click records to not just include a mysfitId, but also other attributes that can be analyzed later.", memory_size=128, code=aws_lambda.Code.asset("../../lambda-streaming-processor"), timeout=core.Duration.seconds(30), initial_policy=[lambda_function_policy], environment={ # TODO: this seems better than having the user copy/paste it in, but is it the best way? "MYSFITS_API_URL": "https://{}.execute-api.{}.amazonaws.com/prod/".format( props.api_gateway.ref, core.Aws.REGION) }, ) firehose_delivery_role = aws_iam.Role( self, "FirehoseDeliveryRole", role_name="FirehoseDeliveryRole", assumed_by=aws_iam.ServicePrincipal("firehose.amazonaws.com"), external_id=core.Aws.ACCOUNT_ID, ) firehose_delivery_policy_s3_statement = aws_iam.PolicyStatement() firehose_delivery_policy_s3_statement.add_actions( "s3:AbortMultipartUpload", "s3:GetBucketLocation", "s3:GetObject", "s3:ListBucket", "s3:ListBucketMultipartUploads", "s3:PutObject", ) firehose_delivery_policy_s3_statement.add_resources( clicks_destination_bucket.bucket_arn) firehose_delivery_policy_s3_statement.add_resources( clicks_destination_bucket.arn_for_objects("*")) firehose_delivery_policy_lambda_statement = aws_iam.PolicyStatement() firehose_delivery_policy_lambda_statement.add_actions( "lambda:InvokeFunction") firehose_delivery_policy_lambda_statement.add_resources( mysfits_clicks_processor.function_arn) firehose_delivery_role.add_to_policy( firehose_delivery_policy_s3_statement) firehose_delivery_role.add_to_policy( firehose_delivery_policy_lambda_statement) mysfits_firehose_to_s3 = aws_kinesisfirehose.CfnDeliveryStream( self, "DeliveryStream", extended_s3_destination_configuration=aws_kinesisfirehose. CfnDeliveryStream.ExtendedS3DestinationConfigurationProperty( bucket_arn=clicks_destination_bucket.bucket_arn, buffering_hints=aws_kinesisfirehose.CfnDeliveryStream. BufferingHintsProperty(interval_in_seconds=60, size_in_m_bs=50), compression_format="UNCOMPRESSED", prefix="firehose/", role_arn=firehose_delivery_role.role_arn, processing_configuration=aws_kinesisfirehose.CfnDeliveryStream. ProcessingConfigurationProperty( enabled=True, processors=[ aws_kinesisfirehose.CfnDeliveryStream. ProcessorProperty( parameters=[ aws_kinesisfirehose.CfnDeliveryStream. ProcessorParameterProperty( parameter_name="LambdaArn", parameter_value=mysfits_clicks_processor. function_arn, ) ], type="Lambda", ) ], ), ), ) aws_lambda.CfnPermission( self, "Permission", action="lambda:InvokeFunction", function_name=mysfits_clicks_processor.function_arn, principal="firehose.amazonaws.com", source_account=core.Aws.ACCOUNT_ID, source_arn=mysfits_firehose_to_s3.attr_arn, ) click_processing_api_role = aws_iam.Role( self, "ClickProcessingApiRole", assumed_by=aws_iam.ServicePrincipal("apigateway.amazonaws.com"), ) api_policy = aws_iam.PolicyStatement() api_policy.add_actions("firehose:PutRecord") api_policy.add_resources(mysfits_firehose_to_s3.attr_arn) aws_iam.Policy( self, "ClickProcessingApiPolicy", policy_name="api_gateway_firehose_proxy_role", statements=[api_policy], roles=[click_processing_api_role], ) api = aws_apigateway.RestApi( self, "APIEndpoint", rest_api_name="ClickProcessing API Service", endpoint_types=[aws_apigateway.EndpointType.REGIONAL], ) clicks = api.root.add_resource("clicks") clicks.add_method( "PUT", aws_apigateway.AwsIntegration( service="firehose", integration_http_method="POST", action="PutRecord", options=aws_apigateway.IntegrationOptions( connection_type=aws_apigateway.ConnectionType.INTERNET, credentials_role=click_processing_api_role, integration_responses=[ aws_apigateway.IntegrationResponse( status_code="200", response_templates={ "application/json": '{"status": "OK"}' }, response_parameters={ "method.response.header.Access-Control-Allow-Headers": "'Content-Type'", "method.response.header.Access-Control-Allow-Methods": "'OPTIONS,PUT'", "method.response.header.Access-Control-Allow-Origin": "'*'", }, ) ], request_parameters={ "integration.request.header.Content-Type": "'application/x-amz-json-1.1'" }, request_templates={ "application/json": """{ "DeliveryStreamName": "%s", "Record": { "Data": "$util.base64Encode($input.json('$'))" }}""" % mysfits_firehose_to_s3.ref }, ), ), method_responses=[ aws_apigateway.MethodResponse( status_code="200", response_parameters={ "method.response.header.Access-Control-Allow-Headers": True, "method.response.header.Access-Control-Allow-Methods": True, "method.response.header.Access-Control-Allow-Origin": True, }, ) ], ) clicks.add_method( "OPTIONS", aws_apigateway.MockIntegration( integration_responses=[ aws_apigateway.IntegrationResponse( status_code="200", response_parameters={ "method.response.header.Access-Control-Allow-Headers": "'Content-Type,X-Amz-Date,Authorization,X-Api-Key,X-Amz-Security-Token,X-Amz-User-Agent'", "method.response.header.Access-Control-Allow-Origin": "'*'", "method.response.header.Access-Control-Allow-Credentials": "'false'", "method.response.header.Access-Control-Allow-Methods": "'OPTIONS,GET,PUT,POST,DELETE'", }, ) ], passthrough_behavior=aws_apigateway.PassthroughBehavior.NEVER, request_templates={"application/json": '{"statusCode": 200}'}, ), method_responses=[ aws_apigateway.MethodResponse( status_code="200", response_parameters={ "method.response.header.Access-Control-Allow-Headers": True, "method.response.header.Access-Control-Allow-Methods": True, "method.response.header.Access-Control-Allow-Credentials": True, "method.response.header.Access-Control-Allow-Origin": True, }, ) ], )
def _add_lambda_cleanup(self, policy_statements, build_tags): lambda_cleanup_execution_role = None if self.custom_cleanup_lambda_role: execution_role = self.custom_cleanup_lambda_role else: # LambdaCleanupPolicies self._add_resource_delete_policy( policy_statements, ["cloudformation:DeleteStack"], [ self.format_arn( service="cloudformation", resource="stack", resource_name="{0}/{1}".format( self.image_id, self._stack_unique_id()), ) ], ) self._add_resource_delete_policy( policy_statements, ["ec2:CreateTags"], [ self.format_arn( service="ec2", account="", resource="image", region=region, resource_name="*", ) for region in self._get_distribution_regions() ], ) self._add_resource_delete_policy( policy_statements, ["tag:TagResources"], ["*"], ) self._add_resource_delete_policy( policy_statements, [ "iam:DetachRolePolicy", "iam:DeleteRole", "iam:DeleteRolePolicy" ], [ self.format_arn( service="iam", resource="role", region="", resource_name="{0}/{1}".format( IAM_ROLE_PATH.strip("/"), self._build_resource_name( IMAGEBUILDER_RESOURCE_NAME_PREFIX + "Cleanup"), ), ) ], ) self._add_resource_delete_policy( policy_statements, ["lambda:DeleteFunction", "lambda:RemovePermission"], [ self.format_arn( service="lambda", resource="function", sep=":", resource_name=self._build_resource_name( IMAGEBUILDER_RESOURCE_NAME_PREFIX), ) ], ) self._add_resource_delete_policy( policy_statements, ["logs:DeleteLogGroup"], [ self.format_arn( service="logs", resource="log-group", sep=":", resource_name="/aws/lambda/{0}:*".format( self._build_resource_name( IMAGEBUILDER_RESOURCE_NAME_PREFIX)), ) ], ) self._add_resource_delete_policy( policy_statements, ["iam:RemoveRoleFromInstanceProfile"], [ self.format_arn( service="iam", resource="instance-profile", region="", resource_name="{0}/{1}".format( IAM_ROLE_PATH.strip("/"), self._build_resource_name( IMAGEBUILDER_RESOURCE_NAME_PREFIX), ), ) ], ) self._add_resource_delete_policy( policy_statements, ["iam:DetachRolePolicy", "iam:DeleteRolePolicy"], [ self.format_arn( service="iam", resource="role", region="", resource_name="{0}/{1}".format( IAM_ROLE_PATH.strip("/"), self._build_resource_name( IMAGEBUILDER_RESOURCE_NAME_PREFIX), ), ) ], ) self._add_resource_delete_policy( policy_statements, [ "SNS:GetTopicAttributes", "SNS:DeleteTopic", "SNS:Unsubscribe" ], [ self.format_arn( service="sns", resource="{0}".format( self._build_resource_name( IMAGEBUILDER_RESOURCE_NAME_PREFIX)), ) ], ) policy_document = iam.PolicyDocument(statements=policy_statements) managed_lambda_policy = [ Fn.sub( "arn:${AWS::Partition}:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" ), ] # LambdaCleanupExecutionRole lambda_cleanup_execution_role = iam.CfnRole( self, "DeleteStackFunctionExecutionRole", managed_policy_arns=managed_lambda_policy, assume_role_policy_document=get_assume_role_policy_document( "lambda.amazonaws.com"), path=IAM_ROLE_PATH, policies=[ iam.CfnRole.PolicyProperty( policy_document=policy_document, policy_name="LambdaCleanupPolicy", ), ], tags=build_tags, role_name=self._build_resource_name( IMAGEBUILDER_RESOURCE_NAME_PREFIX + "Cleanup"), ) execution_role = lambda_cleanup_execution_role.attr_arn # LambdaCleanupEnv lambda_env = awslambda.CfnFunction.EnvironmentProperty( variables={"IMAGE_STACK_ARN": self.stack_id}) # LambdaCWLogGroup lambda_log = logs.CfnLogGroup( self, "DeleteStackFunctionLog", log_group_name="/aws/lambda/{0}".format( self._build_resource_name(IMAGEBUILDER_RESOURCE_NAME_PREFIX)), ) # LambdaCleanupFunction lambda_cleanup = awslambda.CfnFunction( self, "DeleteStackFunction", function_name=self._build_resource_name( IMAGEBUILDER_RESOURCE_NAME_PREFIX), code=awslambda.CfnFunction.CodeProperty( s3_bucket=self.config.custom_s3_bucket or S3Bucket.get_bucket_name( AWSApi.instance().sts.get_account_id(), get_region()), s3_key=self.bucket.get_object_key(S3FileType.CUSTOM_RESOURCES, "artifacts.zip"), ), handler="delete_image_stack.handler", memory_size=128, role=execution_role, runtime="python3.8", timeout=900, environment=lambda_env, tags=build_tags, ) permission = awslambda.CfnPermission( self, "DeleteStackFunctionPermission", action="lambda:InvokeFunction", principal="sns.amazonaws.com", function_name=lambda_cleanup.attr_arn, source_arn=Fn.ref("BuildNotificationTopic"), ) lambda_cleanup.add_depends_on(lambda_log) return lambda_cleanup, permission, lambda_cleanup_execution_role, lambda_log
def __init__(self, stack: core.Stack, prefix: str, vpc_parameters: VPCParameters, database: Union[aws_rds.CfnDBInstance, aws_rds.CfnDBCluster], kms_key: Optional[aws_kms.Key] = None) -> None: """ Constructor. :param stack: A stack in which resources should be created. :param prefix: A prefix to give for every resource. :param vpc_parameters: VPC parameters for resource (e.g. lambda rotation function) configuration. :param database: A database instance for which this secret should be applied. :param kms_key: Custom or managed KMS key for secret encryption. """ super().__init__() # This template is sent to a lambda function that executes secret rotation. # If you choose to change this template, make sure you change lambda # function source code too. template = { 'engine': 'mysql', 'host': database.attr_endpoint_address, 'username': database.master_username, 'password': database.master_user_password, 'dbname': None, 'port': 3306 } # Instances and clusters have different attributes. if isinstance(database, aws_rds.CfnDBInstance): template['dbname'] = database.db_name elif isinstance(database, aws_rds.CfnDBCluster): template['dbname'] = database.database_name # Create a secret instance. self.secret = aws_secretsmanager.Secret( scope=stack, id=prefix + 'RdsSecret', description=f'A secret for {prefix}.', encryption_key=kms_key, generate_secret_string=SecretStringGenerator( generate_string_key='password', secret_string_template=json.dumps(template)), secret_name=prefix + 'RdsSecret') # Make sure database is fully deployed and configured before creating a secret for it. self.secret.node.add_dependency(database) # Create a lambda function for secret rotation. self.secret_rotation = SecretRotation(stack=stack, prefix=prefix, secret=self.secret, kms_key=kms_key, vpc_parameters=vpc_parameters, database=database) # Make sure secrets manager can invoke this lambda function. self.sm_invoke_permission = aws_lambda.CfnPermission( scope=stack, id=prefix + 'SecretsManagerInvokePermission', action='lambda:InvokeFunction', function_name=self.secret_rotation.rotation_lambda_function. function_name, principal="secretsmanager.amazonaws.com", ) # Make sure lambda function is created before making its permissions. self.sm_invoke_permission.node.add_dependency( self.secret_rotation.rotation_lambda_function) # Apply rotation for the secret instance. self.rotation_schedule = aws_secretsmanager.RotationSchedule( scope=stack, id=prefix + 'RotationSchedule', secret=self.secret, rotation_lambda=self.secret_rotation.rotation_lambda_function, automatically_after=core.Duration.days(30)) # Make sure invoke permission for secrets manager is created before creating a schedule. self.rotation_schedule.node.add_dependency(self.sm_invoke_permission) # Instances and clusters have different arns. if isinstance(database, aws_rds.CfnDBInstance): assert database.db_instance_identifier, 'Instance identifier must be specified.' target_arn = f'arn:aws:rds:eu-west-1:{stack.account}:db:{database.db_instance_identifier}' elif isinstance(database, aws_rds.CfnDBCluster): assert database.db_cluster_identifier, 'Cluster identifier must be specified.' target_arn = f'arn:aws:rds:eu-west-1:{stack.account}:cluster:{database.db_cluster_identifier}' else: raise TypeError('Unsupported DB type.') # Instances and clusters should have different attachment types. if isinstance(database, aws_rds.CfnDBInstance): target_type = 'AWS::RDS::DBInstance' elif isinstance(database, aws_rds.CfnDBCluster): target_type = 'AWS::RDS::DBCluster' else: raise TypeError('Unsupported DB type.') # Attach the secret instance to the desired database. self.target_db_attachment = aws_secretsmanager.CfnSecretTargetAttachment( scope=stack, id=prefix + 'TargetRdsAttachment', secret_id=self.secret.secret_arn, target_id=target_arn, target_type=target_type)
def __init__(self, scope: core.Construct, id: str, data, iam_vars) -> None: super().__init__(scope, id) # VPC vpc = ec2.CfnVPC(self, "cdk-vpc", cidr_block=data["vpc"]) igw = ec2.CfnInternetGateway(self, id="igw") ec2.CfnVPCGatewayAttachment(self, id="igw-attach", vpc_id=vpc.ref, internet_gateway_id=igw.ref) public_route_table = ec2.CfnRouteTable(self, id="public_route_table", vpc_id=vpc.ref) ec2.CfnRoute(self, id="public_route", route_table_id=public_route_table.ref, destination_cidr_block="0.0.0.0/0", gateway_id=igw.ref) public_subnets = [] for i, s in enumerate(data["subnets"]["public"]): subnet = ec2.CfnSubnet(self, id="public_{}".format(s), cidr_block=s, vpc_id=vpc.ref, availability_zone=core.Fn.select( i, core.Fn.get_azs()), map_public_ip_on_launch=True) public_subnets.append(subnet) ec2.CfnSubnetRouteTableAssociation( self, id="public_{}_association".format(s), route_table_id=public_route_table.ref, subnet_id=subnet.ref) eip = ec2.CfnEIP(self, id="natip") nat = ec2.CfnNatGateway(self, id="nat", allocation_id=eip.attr_allocation_id, subnet_id=public_subnets[0].ref) private_route_table = ec2.CfnRouteTable(self, id="private_route_table", vpc_id=vpc.ref) ec2.CfnRoute(self, id="private_route", route_table_id=private_route_table.ref, destination_cidr_block="0.0.0.0/0", nat_gateway_id=nat.ref) private_subnets = [] for i, s in enumerate(data["subnets"]["private"]): subnet = ec2.CfnSubnet(self, id="private_{}".format(s), cidr_block=s, vpc_id=vpc.ref, availability_zone=core.Fn.select( i, core.Fn.get_azs()), map_public_ip_on_launch=False) private_subnets.append(subnet) ec2.CfnSubnetRouteTableAssociation( self, id="private_{}_association".format(s), route_table_id=private_route_table.ref, subnet_id=subnet.ref) # Security groups lb_sg = ec2.CfnSecurityGroup(self, id="lb", group_description="LB SG", vpc_id=vpc.ref) lambda_sg = ec2.CfnSecurityGroup(self, id="lambda", group_description="Lambda SG", vpc_id=vpc.ref) public_prefix = ec2.CfnPrefixList(self, id="cidr_prefix", address_family="IPv4", max_entries=1, prefix_list_name="public", entries=[{ "cidr": "0.0.0.0/0", "description": "Public" }]) _sg_rules = [{ 'sg': lb_sg.attr_group_id, 'rules': [{ "direction": "ingress", "description": "HTTP from Internet", "from_port": 80, "to_port": 80, "protocol": "tcp", "cidr_blocks": public_prefix.ref }, { "direction": "egress", "description": "LB to Lambda", "from_port": 80, "to_port": 80, "protocol": "tcp", "source_security_group_id": lambda_sg.attr_group_id }] }, { "sg": lambda_sg.attr_group_id, "rules": [{ "direction": "ingress", "description": "HTTP from LB", "from_port": 80, "to_port": 80, "protocol": "tcp", "source_security_group_id": lb_sg.attr_group_id }, { "direction": "egress", "description": "All to Internet", "from_port": 0, "to_port": 65535, "protocol": "tcp", "cidr_blocks": public_prefix.ref }] }] for ruleset in _sg_rules: for rule in ruleset["rules"]: if rule["direction"] == "ingress": ec2.CfnSecurityGroupIngress( self, id=rule["description"].replace(" ", "_"), description=rule["description"], to_port=rule["to_port"], from_port=rule["from_port"], ip_protocol=rule["protocol"], group_id=ruleset["sg"], source_prefix_list_id=rule["cidr_blocks"] if "cidr_blocks" in rule else None, source_security_group_id=rule[ "source_security_group_id"] if "source_security_group_id" in rule else None) else: ec2.CfnSecurityGroupEgress( self, id=rule["description"].replace(" ", "_"), description=rule["description"], to_port=rule["to_port"], from_port=rule["from_port"], ip_protocol=rule["protocol"], group_id=ruleset["sg"], destination_prefix_list_id=rule["cidr_blocks"] if "cidr_blocks" in rule else None, destination_security_group_id=rule[ "source_security_group_id"] if "source_security_group_id" in rule else None) # IAM assume_policy_doc = iam.PolicyDocument() for statement in iam_vars["assume"]["Statement"]: _statement = iam.PolicyStatement(actions=[statement["Action"]], ) _statement.add_service_principal(statement["Principal"]["Service"]) assume_policy_doc.add_statements(_statement) role = iam.CfnRole(self, id="iam_role", path="/", assume_role_policy_document=assume_policy_doc) role_policy_doc = iam.PolicyDocument() for statement in iam_vars["policy"]["Statement"]: _statement = iam.PolicyStatement(actions=statement["Action"], resources=["*"]) role_policy_doc.add_statements(_statement) policy = iam.CfnPolicy(self, id="iam_policy", policy_document=role_policy_doc, policy_name="cdkPolicy", roles=[role.ref]) # Lambda shutil.make_archive("../lambda", 'zip', "../lambda/") s3_client = boto3.client('s3') s3_client.upload_file("../lambda.zip", "cloudevescops-zdays-demo", "cdk.zip") function = lmd.CfnFunction(self, id="lambda_function", handler="lambda.lambda_handler", role=role.attr_arn, runtime="python3.7", code={ "s3Bucket": "cloudevescops-zdays-demo", "s3Key": "cdk.zip" }, vpc_config={ "securityGroupIds": [lambda_sg.ref], "subnetIds": [s.ref for s in private_subnets] }, environment={"variables": { "TOOL": "CDK" }}) # LB lb = alb.CfnLoadBalancer(self, id="alb", name="lb-cdk", scheme="internet-facing", type="application", subnets=[s.ref for s in public_subnets], security_groups=[lb_sg.ref]) lmd.CfnPermission(self, id="lambda_permis", action="lambda:InvokeFunction", function_name=function.ref, principal="elasticloadbalancing.amazonaws.com") tg = alb.CfnTargetGroup(self, id="alb_tg", name="lambda-cdk", target_type="lambda", health_check_enabled=True, health_check_interval_seconds=40, health_check_path="/", health_check_timeout_seconds=30, targets=[{ "id": function.get_att("Arn").to_string() }], matcher={"httpCode": "200"}) alb.CfnListener(self, id="listener", default_actions=[{ "type": "forward", "targetGroupArn": tg.ref }], load_balancer_arn=lb.ref, port=80, protocol="HTTP") core.CfnOutput(self, id="fqdn", value=lb.attr_dns_name)
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) brand = 'a' stage = 'dev' tablename = 'webchat' connectionstable = dynamodb.Table( self, 'connectionsTable', billing_mode=dynamodb.BillingMode.PAY_PER_REQUEST, removal_policy=core.RemovalPolicy.DESTROY, table_name=tablename, partition_key=dynamodb.Attribute( name="connectionId", type=dynamodb.AttributeType.STRING), ) websocketgw = apiv2.CfnApi( self, 'websocket', name='SimpleChatWebSocket', protocol_type='WEBSOCKET', route_selection_expression='$request.body.action') # connect function connect_function = py_lambda.PythonFunction( self, "connect_function", entry='websocket/api_lambda/connect', #folder index='connect.py', #file handler='lambda_handler', #function description='connect', environment={ 'brand': brand, 'stage': stage, 'CONNECTION_TABLE_NAME': tablename }, timeout=core.Duration.seconds(60)) connectionstable.grant_read_write_data(connect_function) connect_function_policy = iam.Policy( self, 'connect_policy', statements=[ iam.PolicyStatement(actions=['dynamodb:*'], resources=[connectionstable.table_arn]) ], roles=[connect_function.role]) connect_function_permission = aws_lambda.CfnPermission( self, 'connectFunctionPermission', action='lambda:InvokeFunction', function_name=connect_function.function_name, principal='apigateway.amazonaws.com') connect_function_permission.add_depends_on(websocketgw) # # disconnect function disconnect_function = py_lambda.PythonFunction( self, "disconnect_function", entry='websocket/api_lambda/disconnect', #folder index='disconnect.py', #file handler='lambda_handler', #function description='disconnect', environment={ 'brand': brand, 'stage': stage, 'CONNECTION_TABLE_NAME': tablename }, timeout=core.Duration.seconds(60)) disconnect_function_policy = iam.Policy( self, 'disconnect_policy', statements=[ iam.PolicyStatement(actions=['dynamodb:*'], resources=[connectionstable.table_arn]) ], roles=[disconnect_function.role]) disconnect_function_permission = aws_lambda.CfnPermission( self, 'disconnectFunctionPermission', action='lambda:InvokeFunction', function_name=disconnect_function.function_name, principal='apigateway.amazonaws.com') connectionstable.grant_read_write_data(disconnect_function) disconnect_function_permission.add_depends_on(websocketgw) #send message function. sendmessage_function = py_lambda.PythonFunction( self, "sendmessage_function", entry='websocket/api_lambda/sendmessage', #folder index='sendmessage.py', #file handler='lambda_handler', #function description='sendmessage', environment={ 'brand': brand, 'stage': stage, 'CONNECTION_TABLE_NAME': tablename }, timeout=core.Duration.seconds(60)) connectionstable.grant_read_write_data(connect_function) sendmessage_function_policy = iam.Policy( self, 'sendmessage_policy', statements=[ iam.PolicyStatement(actions=['dynamodb:*'], resources=[connectionstable.table_arn]), iam.PolicyStatement( actions=['execute-api:ManageConnections'], resources=[ f'arn:aws:execute-api:aws:{self.region}:{self.account}:{websocketgw.ref}/*', f'arn:aws:execute-api:{self.region}:{self.account}:{websocketgw.ref}/prod/POST/@connections/*' ], ), ], roles=[sendmessage_function.role]) sendmessage_function_permission = aws_lambda.CfnPermission( self, 'sendmessageFunctionPermission', action='lambda:InvokeFunction', function_name=sendmessage_function.function_name, principal='apigateway.amazonaws.com') sendmessage_function_permission.add_depends_on(websocketgw) #set username function setusername_function = py_lambda.PythonFunction( self, "setusername_function", entry='websocket/api_lambda/setusername', #folder index='setusername.py', #file handler='lambda_handler', #function description='setusername', environment={ 'brand': brand, 'stage': stage, 'CONNECTION_TABLE_NAME': tablename }, timeout=core.Duration.seconds(60)) connectionstable.grant_read_write_data(connect_function) setusername_function_policy = iam.Policy( self, 'setusername_policy', statements=[ iam.PolicyStatement(actions=['dynamodb:*'], resources=[connectionstable.table_arn]), iam.PolicyStatement( actions=['execute-api:ManageConnections'], resources=[ f'arn:aws:execute-api:aws:{self.region}:{self.account}:{websocketgw.ref}/*', f'arn:aws:execute-api:{self.region}:{self.account}:{websocketgw.ref}/prod/POST/@connections/*' ], ), ], roles=[setusername_function.role]) setusername_function_permission = aws_lambda.CfnPermission( self, 'setusernameFunctionPermission', action='lambda:InvokeFunction', function_name=setusername_function.function_name, principal='apigateway.amazonaws.com') setusername_function_permission.add_depends_on(websocketgw) # Connect route connect_integration = apiv2.CfnIntegration( self, 'ConnectIntegration', api_id=websocketgw.ref, description='Connect Integration', integration_type='AWS_PROXY', integration_uri= f'arn:aws:apigateway:{self.region}:lambda:path/2015-03-31/functions/{connect_function.function_arn}/invocations' ) connect_route = apiv2.CfnRoute(self, 'connectRoute', api_id=websocketgw.ref, route_key='$connect', authorization_type='NONE', operation_name='ConnectRoute', target='integrations/' + connect_integration.ref) # #Disconnect route disconnect_integration = apiv2.CfnIntegration( self, 'disConnectIntegration', api_id=websocketgw.ref, description='disConnect Integration', integration_type='AWS_PROXY', integration_uri= f'arn:aws:apigateway:{self.region}:lambda:path/2015-03-31/functions/{disconnect_function.function_arn}/invocations' ) disconnect_route = apiv2.CfnRoute(self, 'disconnectRoute', api_id=websocketgw.ref, route_key='$disconnect', authorization_type='NONE', operation_name='DisconnectRoute', target='integrations/' + disconnect_integration.ref) #Send Route sendmessage_integration = apiv2.CfnIntegration( self, 'sendMessageIntegration', api_id=websocketgw.ref, description='sendmessage Integration', integration_type='AWS_PROXY', integration_uri= f'arn:aws:apigateway:{self.region}:lambda:path/2015-03-31/functions/{sendmessage_function.function_arn}/invocations' ) sendmessage_route = apiv2.CfnRoute(self, 'sendRoute', api_id=websocketgw.ref, route_key='sendmessage', authorization_type='NONE', operation_name='SendRoute', target='integrations/' + sendmessage_integration.ref) #Set username Route setusername_integration = apiv2.CfnIntegration( self, 'setUsernameIntegration', api_id=websocketgw.ref, description='setusername Integration', integration_type='AWS_PROXY', integration_uri= f'arn:aws:apigateway:{self.region}:lambda:path/2015-03-31/functions/{setusername_function.function_arn}/invocations' ) setusername_route = apiv2.CfnRoute(self, 'setUsernameRoute', api_id=websocketgw.ref, route_key='setusername', authorization_type='NONE', operation_name='SetUsernameRoute', target='integrations/' + setusername_integration.ref) deployment = apiv2.CfnDeployment( self, 'Deployment', api_id=websocketgw.ref, ) deployment.add_depends_on(sendmessage_route) deployment.add_depends_on(setusername_route) deployment.add_depends_on(connect_route) deployment.add_depends_on(disconnect_route) stage = apiv2.CfnStage( self, 'stage', stage_name='prod', description='prod stage', # deployment_id= deployment.ref, api_id=websocketgw.ref, ) core.CfnOutput( self, 'WebSocketURI', value= f'wss://{websocketgw.ref}.execute-api.{self.region}.amazonaws.com/prod', description='URI of websocket') print('WebSocket')
def __init__(self, scope: core.Construct, construct_id: str, stack_log_level: str, src_stream, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) # The code that defines your stack goes here # Create an S3 Bucket for storing streaming data events from firehose fh_data_store = _s3.Bucket(self, "fhDataStore", removal_policy=core.RemovalPolicy.DESTROY, auto_delete_objects=False) firehose_delivery_stream_name = f"phi_data_filter" # Firehose Lambda Transformer # Read Lambda Code try: with open( "sensitive_data_filter_instream/stacks/back_end/firehose_transformation_stack/lambda_src/kinesis_firehose_transformer.py", encoding="utf-8", mode="r") as f: fh_transformer_fn_code = f.read() except OSError: print("Unable to read Lambda Function Code") raise fh_transformer_fn = _lambda.Function( self, "fhDataTransformerFn", function_name=f"fh_data_transformer", description="Transform incoming data events with newline character", runtime=_lambda.Runtime.PYTHON_3_7, code=_lambda.InlineCode(fh_transformer_fn_code), handler="index.lambda_handler", timeout=core.Duration.seconds(60), reserved_concurrent_executions=1, environment={ "LOG_LEVEL": "INFO", "APP_ENV": "Production", }) # Create Custom Loggroup for Producer fh_transformer_fn_lg = _logs.LogGroup( self, "fhDataTransformerFnLogGroup", log_group_name=f"/aws/lambda/{fh_transformer_fn.function_name}", removal_policy=core.RemovalPolicy.DESTROY, retention=_logs.RetentionDays.ONE_DAY) fh_delivery_role = _iam.Role( self, "fhDeliveryRole", # role_name="FirehoseDeliveryRole", assumed_by=_iam.ServicePrincipal("firehose.amazonaws.com"), external_id=core.Aws.ACCOUNT_ID, ) # Add permissions to allow Kinesis Fireshose to Write to S3 roleStmt1 = _iam.PolicyStatement(effect=_iam.Effect.ALLOW, resources=[ f"{fh_data_store.bucket_arn}", f"{fh_data_store.bucket_arn}/*" ], actions=[ "s3:AbortMultipartUpload", "s3:GetBucketLocation", "s3:GetObject", "s3:ListBucket", "s3:ListBucketMultipartUploads", "s3:PutObject" ]) # roleStmt1.add_resources( # fh_data_store.arn_for_objects("*") # ) roleStmt1.sid = "AllowKinesisToWriteToS3" fh_delivery_role.add_to_policy(roleStmt1) # Add permissions to Kinesis Fireshose to Write to CloudWatch Logs roleStmt2 = _iam.PolicyStatement( effect=_iam.Effect.ALLOW, resources=[ f"arn:aws:logs:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:log-group:/aws/kinesisfirehose/{firehose_delivery_stream_name}:log-stream:*" ], actions=["logs:PutLogEvents"]) roleStmt2.sid = "AllowKinesisToWriteToCloudWatch" fh_delivery_role.add_to_policy(roleStmt2) # Add permissions to Kinesis Fireshose to Invoke Lambda for Transformations roleStmt3 = _iam.PolicyStatement( effect=_iam.Effect.ALLOW, resources=[f"{fh_transformer_fn.function_arn}"], actions=["lambda:InvokeFunction"]) roleStmt3.sid = "AllowKinesisToInvokeLambda" fh_delivery_role.add_to_policy(roleStmt3) # Add permissions to Kinesis Fireshose to Read from Kinesis Data Stream policy_to_allow_fh_to_read_stream = _iam.Policy( self, "allowKinesisFhToReadKinesisDataStream", roles=[fh_delivery_role], statements=[ _iam.PolicyStatement( effect=_iam.Effect.ALLOW, resources=[f"{src_stream.stream_arn}"], sid="AllowKinesisFhToReadKinesisDataStream", actions=[ "kinesis:DescribeStream", "kinesis:GetShardIterator", "kinesis:GetRecords", "kinesis:ListShards" ]) ]) self.fh_to_s3 = _kinesis_fh.CfnDeliveryStream( self, "fhDeliveryStream", delivery_stream_name=f"{firehose_delivery_stream_name}", delivery_stream_type=f"KinesisStreamAsSource", kinesis_stream_source_configuration=_kinesis_fh.CfnDeliveryStream. KinesisStreamSourceConfigurationProperty( kinesis_stream_arn=f"{src_stream.stream_arn}", role_arn=f"{fh_delivery_role.role_arn}"), extended_s3_destination_configuration=_kinesis_fh. CfnDeliveryStream.ExtendedS3DestinationConfigurationProperty( bucket_arn=fh_data_store.bucket_arn, buffering_hints=_kinesis_fh.CfnDeliveryStream. BufferingHintsProperty(interval_in_seconds=60, size_in_m_bs=1), compression_format="UNCOMPRESSED", prefix=f"phi-data/", # prefix="phi-data/date=!{timestamp:yyyy}-!{timestamp:MM}-!{timestamp:dd}/", role_arn=fh_delivery_role.role_arn, processing_configuration=_kinesis_fh.CfnDeliveryStream. ProcessingConfigurationProperty( enabled=True, processors=[ _kinesis_fh.CfnDeliveryStream.ProcessorProperty( parameters=[ _kinesis_fh.CfnDeliveryStream. ProcessorParameterProperty( parameter_name="LambdaArn", parameter_value=fh_transformer_fn. function_arn, ) ], type="Lambda", ) ]), ), ) self.fh_to_s3.add_depends_on( policy_to_allow_fh_to_read_stream.node.default_child) # Restrict Transformer Lambda to be invoked by Firehose only from the stack owner account _lambda.CfnPermission( self, "restrictLambdaInvocationToFhInOwnAccount", action="lambda:InvokeFunction", function_name=fh_transformer_fn.function_arn, principal="firehose.amazonaws.com", source_account=core.Aws.ACCOUNT_ID, source_arn=self.fh_to_s3.attr_arn, ) ########################################### ################# OUTPUTS ################# ########################################### output_0 = core.CfnOutput( self, "AutomationFrom", value=f"{GlobalArgs.SOURCE_INFO}", description= "To know more about this automation stack, check out our github page." ) output_1 = core.CfnOutput( self, "FirehoseArn", value= f"https://console.aws.amazon.com/firehose/home?region={core.Aws.REGION}#/details/{self.fh_to_s3.delivery_stream_name}", description= "Produce streaming data events and push to Kinesis stream.") output_2 = core.CfnOutput( self, "FirehoseDataStore", value= f"https://console.aws.amazon.com/s3/buckets/{fh_data_store.bucket_name}", description="The firehose datastore bucket") output_3 = core.CfnOutput( self, "SensitiveDataFilter", value= f"https://console.aws.amazon.com/lambda/home?region={core.Aws.REGION}#/functions/{fh_transformer_fn.function_name}", description="Filter Sensitive data from event.")
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # Set Parameters db_password_parameters = core.CfnParameter( self, "DBPassword", no_echo=True, description="New account and RDS password", min_length=1, max_length=41, constraint_description= "the password must be between 1 and 41 characters", default="DBPassword") # LambdaExecutionRole LambdaExecutionRole = iam.CfnRole( self, "LabelsLambdaExecutionRole", assume_role_policy_document={ "Version": "2012-10-17", "Statement": [{ "Effect": "Allow", "Principal": { "Service": ["lambda.amazonaws.com"] }, "Action": ["sts:AssumeRole"] }] }, managed_policy_arns=[ "arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole", "arn:aws:iam::aws:policy/AmazonRekognitionReadOnlyAccess", "arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess", "arn:aws:iam::aws:policy/AWSXrayWriteOnlyAccess" ], policies=[{ "policyName": "root", "policyDocument": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": ["logs:*"], "Resource": "arn:aws:logs:*:*:*" }, ] } }]) # S3 Bucket source_bucket = "sourcebucketname%s" % (core.Aws.ACCOUNT_ID) # LabelsLambda LabelsLambda = lambda_.CfnFunction( self, "LabelsLambda", handler="lambda_function.lambda_handler", role=LambdaExecutionRole.attr_arn, code={ "s3Bucket": source_bucket, "s3Key": "lambda.zip" }, runtime="python3.6", timeout=120, tracing_config={"mode": "Active"}, vpc_config={ "securityGroupIds": [core.Fn.import_value("LambdaSecurityGroupOutput")], "subnetIds": [ core.Fn.import_value("PrivateSubnet1"), core.Fn.import_value("PrivateSubnet2") ] }, environment={ "variables": { "DATABASE_HOST": core.Fn.import_value("MyDBEndpoint"), "DATABASE_USER": "******", "DATABASE_PASSWORD": db_password_parameters.value_as_string, "DATABASE_DB_NAME": "Photos" } }) # UploadQueue upload_queue = sqs.CfnQueue(self, "UploadQueue", queue_name="uploads-queue", message_retention_period=12800, visibility_timeout=300) # UploadSNSTopic upload_sns_topic = sns.CfnTopic( self, "UploadSNSTopic", display_name="uploads-topic", subscription=[{ "endpoint": upload_queue.attr_arn, "protocol": "sqs" }, { "endpoint": LabelsLambda.attr_arn, "protocol": "lambda" }], ) # QueuePolicy queue_policy = sqs.CfnQueuePolicy( self, "QueuePolicy", queues=[upload_queue.ref], policy_document={ "Version": "2012-10-17", "Id": "QueuePolicy", "Statement": [{ "Sid": "Allow-SendMessage-To-Queues-From-SNS-Topic", "Effect": "Allow", "Principal": "*", "Action": ["SQS:SendMessage"], "Resource": "*", "Condition": { "ArnEquals": { "aws:SourceArn": "%s" % (upload_sns_topic.ref) } } }] }) # UploadTopicPolicy upload_topic_policy = sns.CfnTopicPolicy( self, "UploadTopicPolicy", policy_document={ "Version": "2012-10-17", "Id": "QueuePolicy", "Statement": [{ "Sid": "Allow-S3-Publish", "Effect": "Allow", "Principal": { "Service": "s3.amazonaws.com" }, "Action": ["SNS:Publish"], "Resource": upload_sns_topic.ref, "Condition": { "StringEquals": { "aws:SourceAccount": "!Sub '${AWS::AccountId}'" }, "ArnLike": { "aws:SourceArn": { "Fn::Join": [ "", [ "arn:aws:s3:*:*:", "!Sub 'imagebucketsns${AWS::AccountId}'" ] ] } } }, }] }, topics=[upload_sns_topic.ref]) # ImageS3Bucket image_s3_bucket = s3.CfnBucket(self, "ImageS3Bucket", bucket_name="imagebucketsns%s" % (core.Aws.ACCOUNT_ID), notification_configuration={ "topicConfigurations": [{ "event": 's3:ObjectCreated:*', "topic": upload_sns_topic.ref }] }) image_s3_bucket.add_depends_on(upload_topic_policy) image_s3_bucket.apply_removal_policy(core.RemovalPolicy.DESTROY) # ImageS3BucketPermission ImageS3BucketPermission = lambda_.CfnPermission( self, "ImageS3BucketPermission", action="lambda:InvokeFunction", function_name=LabelsLambda.attr_arn, principal="sns.amazonaws.com", source_arn=upload_sns_topic.ref) # Outputs core.CfnOutput(self, "ImageS3BucketOutput", value=image_s3_bucket.ref, description="Image S3 Bucket", export_name="ImageS3Bucket") core.CfnOutput(self, "LabelsLambdaOutput", value=LabelsLambda.ref, description="Labels Lambda", export_name="LabelsLambda")