def __init__(self, scope: core.Construct, id: str, **kwargs): super().__init__(scope, id, **kwargs) param = Parameters.instance() pipeline_name = param.getParameter('pipeline_name') connection_arn = param.getParameter('connection_arn') github_owner = param.getParameter('github_owner') github_repo = param.getParameter('github_repo') github_branch = param.getParameter('github_branch') secret_arn = param.getParameter('secret_arn') source_artifact = codepipeline.Artifact() cloud_assembly_artifact = codepipeline.Artifact() pipeline = pipelines.CdkPipeline( self, 'CdkPipeline', cloud_assembly_artifact=cloud_assembly_artifact, pipeline_name=pipeline_name, source_action=cpactions.BitBucketSourceAction( action_name='GithubAction', output=source_artifact, connection_arn=connection_arn, owner=github_owner, repo=github_repo, branch=github_branch), synth_action=pipelines.SimpleSynthAction( source_artifact=source_artifact, cloud_assembly_artifact=cloud_assembly_artifact, role_policy_statements=[ aws_iam.PolicyStatement( actions=["secretsmanager:GetSecretValue"], resources=[secret_arn]) ], install_command= 'npm install -g aws-cdk && pip install --upgrade pip && pip install -r requirements.txt', synth_command= f"cdk synth -v -c region={AppContext.region} -c secret_name={AppContext.secret_name}", )) pipeline.add_application_stage(DeployStage(self, 'Deploy'))
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) lambdaFn = lambda_.Function(self, "WhoAmIFunction", code=lambda_.Code.asset("./code"), handler="index.lambda_handler", timeout=core.Duration.seconds(300), runtime=lambda_.Runtime.PYTHON_3_7, initial_policy=[ iam_.PolicyStatement( actions=['sts:GetCallerIdentity'], resources=['*']), ]) rule = events.Rule(self, "Rule", schedule=events.Schedule.rate( core.Duration.minutes(10))) rule.add_target(targets.LambdaFunction(lambdaFn))
def _build_kms_key_for_env(self) -> None: administrator_arns: List[str] = [ ] # A place to add other admins if needed for KMS admin_principals = iam.CompositePrincipal( *[iam.ArnPrincipal(arn) for arn in administrator_arns], iam.ArnPrincipal(f"arn:aws:iam::{self.context.account_id}:root"), ) self.env_kms_key: kms.Key = kms.Key( self, id="kms-key", removal_policy=core.RemovalPolicy.RETAIN, enabled=True, enable_key_rotation=True, policy=iam.PolicyDocument(statements=[ iam.PolicyStatement(effect=iam.Effect.ALLOW, actions=["kms:*"], resources=["*"], principals=[admin_principals]) ]), )
def __init__(self, scope: core.Stack, id: str, **kwargs): super().__init__(scope, id, **kwargs) self.base_platform = BasePlatform(self, self.stack_name) self.fargate_task_def = aws_ecs.TaskDefinition( self, "TaskDef", compatibility=aws_ecs.Compatibility.EC2_AND_FARGATE, cpu='256', memory_mib='512', ) self.container = self.fargate_task_def.add_container( "NodeServiceContainerDef", image=aws_ecs.ContainerImage.from_registry( "brentley/ecsdemo-nodejs:cdk"), memory_reservation_mib=512, logging=aws_ecs.LogDriver.aws_logs( stream_prefix='ecsworkshop-nodejs'), environment={"REGION": getenv('AWS_DEFAULT_REGION')}, ) self.container.add_port_mappings( aws_ecs.PortMapping(container_port=3000)) self.fargate_service = aws_ecs.FargateService( self, "NodejsFargateService", service_name='ecsdemo-nodejs', task_definition=self.fargate_task_def, cluster=self.base_platform.ecs_cluster, security_group=self.base_platform.services_sec_grp, desired_count=1, cloud_map_options=aws_ecs.CloudMapOptions( cloud_map_namespace=self.base_platform.sd_namespace, name='ecsdemo-nodejs')) self.fargate_task_def.add_to_task_role_policy( aws_iam.PolicyStatement(actions=['ec2:DescribeSubnets'], resources=['*']))
def __init__(self, scope: core.Construct, id: str, map_params: dict, **kwargs): #pylint: disable=W0622 super().__init__(scope, id, **kwargs) LOGGER.debug('Notification configuration required for %s', map_params['name']) # pylint: disable=no-value-for-parameter _slack_func = _lambda.Function.from_function_arn( self, 'slack_lambda_function', 'arn:aws:lambda:{0}:{1}:function:SendSlackNotification'.format( ADF_DEPLOYMENT_REGION, ADF_DEPLOYMENT_ACCOUNT_ID)) _topic = _sns.Topic(self, 'PipelineTopic') _statement = _iam.PolicyStatement( actions=["sns:Publish"], effect=_iam.Effect.ALLOW, principals=[ _iam.ServicePrincipal('sns.amazonaws.com'), _iam.ServicePrincipal('codecommit.amazonaws.com'), _iam.ServicePrincipal('events.amazonaws.com') ], resources=["*"]) _topic.add_to_resource_policy(_statement) _lambda.CfnPermission(self, 'slack_notification_sns_permissions', principal='sns.amazonaws.com', action='lambda:InvokeFunction', source_arn=_topic.topic_arn, function_name='SendSlackNotification') _endpoint = map_params.get('params', {}).get('notification_endpoint', '') _sub = _sns.Subscription( self, 'sns_subscription', topic=_topic, endpoint=_endpoint if '@' in _endpoint else _slack_func.function_arn, protocol=_sns.SubscriptionProtocol.EMAIL if '@' in _endpoint else _sns.SubscriptionProtocol.LAMBDA) if '@' not in _endpoint: _slack_func.add_event_source( source=_event_sources.SnsEventSource(_topic)) self.topic_arn = _topic.topic_arn
def __init__(self, scope: core.Construct, id: str, vpc: ec2.Vpc, **kwargs) -> None: super().__init__(scope, id, **kwargs) # AMI amzn_linux = ec2.MachineImage.latest_amazon_linux( generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX_2, edition=ec2.AmazonLinuxEdition.STANDARD, virtualization=ec2.AmazonLinuxVirt.HVM, storage=ec2.AmazonLinuxStorage.GENERAL_PURPOSE ) i = 1 for subnet in vpc.stack.vpc.select_subnets(subnet_group_name="BASTION").subnets: bastion_host = ec2.BastionHostLinux(self, f"ec2-BASTION-Instance{i}", vpc=vpc, subnet_selection=ec2.SubnetSelection( availability_zones=[subnet.availability_zone], subnet_group_name="BASTION" ), instance_type=ec2.InstanceType("t1.micro"), machine_image=amzn_linux ) bastion_host.allow_ssh_access_from(ec2.Peer.any_ipv4()) i += 1 host_admin_group = iam.Group(self, "HostAdmins", ) policy = iam.Policy(self, "HostAdminPolicy", groups=[host_admin_group] ) policy.add_statements(iam.PolicyStatement(effect=iam.Effect.ALLOW, actions=["ec2-instance-connect:SendSSHPublicKey"], resources= [f"arn:aws:ec2:{self.region}:{self.account}:instance/*"], conditions={"StringEquals": {"ec2:osuser": "******"}} ))
def create_userpool_client_secret( self, user_pool: aws_cognito.CfnUserPool, user_pool_client: aws_cognito.CfnUserPoolClient, tag: str, ) -> custom_resources.AwsCustomResource: """ :return: an AwsCustomResource that provides access to the user pool client secret in the response field `user_pool_client_secret` """ resource = custom_resources.AwsCustomResource( self, f"userpool_client_secret_{tag}", resource_type="Custom::UserPoolClientSecret", policy=custom_resources.AwsCustomResourcePolicy.from_statements( [ aws_iam.PolicyStatement( effect=aws_iam.Effect.ALLOW, actions=["cognito-idp:DescribeUserPoolClient"], resources=[ f"arn:aws:cognito-idp:{self.region}:{self.account}:userpool/{user_pool.ref}" # noqa: E501 ], ) ] ), on_create=custom_resources.AwsSdkCall( physical_resource_id=custom_resources.PhysicalResourceId.of(user_pool_client.ref), service="CognitoIdentityServiceProvider", action="describeUserPoolClient", output_path="UserPoolClient.ClientSecret", parameters={"ClientId": user_pool_client.ref, "UserPoolId": user_pool.ref}, ), on_update=custom_resources.AwsSdkCall( physical_resource_id=custom_resources.PhysicalResourceId.of(user_pool_client.ref), service="CognitoIdentityServiceProvider", action="describeUserPoolClient", output_path="UserPoolClient.ClientSecret", parameters={"ClientId": user_pool_client.ref, "UserPoolId": user_pool.ref}, ), ) return resource
def _create_autoscaling_role(scope: core.Construct, id: str, *, role_name: Optional[str] = None): role = iam.Role( scope, id, role_name=role_name, assumed_by=iam.ServicePrincipal('elasticmapreduce.amazonaws.com'), managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name( 'service-role/AmazonElasticMapReduceforAutoScalingRole') ]) role.assume_role_policy.add_statements( iam.PolicyStatement( effect=iam.Effect.ALLOW, principals=[ iam.ServicePrincipal('application-autoscaling.amazonaws.com') ], actions=[ 'sts:AssumeRole' ] ) ) return role
def build_lambda_role(self, name) -> iam.Role: return iam.Role( self, f"{name}-Role", assumed_by=iam.ServicePrincipal("lambda.amazonaws.com"), inline_policies={ "LambdaFunctionServiceRolePolicy": iam.PolicyDocument(statements=[ iam.PolicyStatement( actions=[ "logs:CreateLogGroup", "logs:CreateLogStream", "logs:PutLogEvents", ], resources=[ f"arn:{Aws.PARTITION}:logs:{Aws.REGION}:{Aws.ACCOUNT_ID}:log-group:/aws/lambda/*" ], ) ]) }, )
def eks_cni(): policy_statement = iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ "ec2:AssignPrivateIpAddresses", "ec2:AttachNetworkInterface", "ec2:CreateNetworkInterface", "ec2:DeleteNetworkInterface", "ec2:DescribeInstances", "ec2:DescribeTags", "ec2:DescribeNetworkInterfaces", "ec2:DescribeInstanceTypes", "ec2:DetachNetworkInterface", "ec2:ModifyNetworkInterfaceAttribute", "ec2:UnassignPrivateIpAddresses", "ec2:CreateTags" ], resources=['*'], conditions={'StringEquals': {"aws:RequestedRegion": "ap-northeast-2"}} ) return policy_statement
def intermediate_user_policy(self, service_role_arn: str): """ The intermedate user policy adds permissions to be able to launch clusters via Service Catalog. """ return self.basic_user_policy(service_role_arn) + [ iam.PolicyStatement( sid="AllowClusterTemplatesRelatedIntermediateActions", resources=["*"], actions=[ "servicecatalog:DescribeProduct", "servicecatalog:DescribeProductView", "servicecatalog:ListLaunchPaths", "servicecatalog:DescribeProvisioningParameters", "servicecatalog:ProvisionProduct", "servicecatalog:SearchProducts", "servicecatalog:ListProvisioningArtifacts", "servicecatalog:DescribeRecord", "cloudformation:DescribeStackResources", ], ) ]
def _connect_pinpoint_to_firehose_delivery_stream(self, *, stack): self.pinpoint_firehose_role = iam.Role( stack, 'PinPointToFirehoseRole', assumed_by=iam.ServicePrincipal('pinpoint.amazonaws.com')) self.pinpoint_firehose_role.add_to_policy( iam.PolicyStatement(effect=iam.Effect.ALLOW, actions=[ "firehose:PutRecordBatch", "firehose:DescribeDeliveryStream" ], resources=[self.kfh_instance.attr_arn])) self.pinpoint_to_kfh = pinpoint.CfnEventStream( stack, 'pinpointclickstreamtokfh', application_id=self.pinpoint_instance.ref, destination_stream_arn=self.kfh_instance.attr_arn, role_arn=self.pinpoint_firehose_role.role_arn)
def provide_access_to_artifacts(scope: core.Construct, *, pipeline_def: Pipeline, artifact_bucket: aws_s3.Bucket) -> None: role_arns = set() for role_arn in pipeline_def.get("artifact_access", {}).get("role_arns", []): role_arns.add(role_arn) for stage_def in pipeline_def["stages"]: for action_def in stage_def["actions"]: if "role_arn" in action_def: account = core.Arn.parse(action_def["role_arn"]).account if account != core.Stack.of(scope).account: role_arns.add(action_def["role_arn"]) for role_arn in role_arns: artifact_bucket.add_to_resource_policy( aws_iam.PolicyStatement( actions=["s3:Get*"], resources=[artifact_bucket.arn_for_objects("*")], effect=aws_iam.Effect.ALLOW, principals=[aws_iam.ArnPrincipal(role_arn)], ))
def create_wic_provider_test_role(self) -> None: wic_provider_test_role_condition = { "StringEquals": {"graph.facebook.com:app_id": self._facebook_app_id} } wic_provider_test_role = aws_iam.Role( self, "wic_provider_test_role", assumed_by=aws_iam.FederatedPrincipal( "graph.facebook.com", wic_provider_test_role_condition, "sts:AssumeRoleWithWebIdentity", ), ) wic_provider_test_role.add_to_policy( aws_iam.PolicyStatement( effect=aws_iam.Effect.ALLOW, actions=["translate:TranslateText"], resources=["*"] ) ) self.parameters_to_save["WICProviderTestRoleArn"] = wic_provider_test_role.role_arn
def _create_fn_from_folder(scope, folder_name: str) -> _lambda.Function: fn = _lambda.Function( scope=scope, id=f"lambda-{folder_name}", code=_lambda.Code.from_asset( path=os.path.join("lambdas", folder_name)), runtime=_lambda.Runtime.PYTHON_3_8, handler="index.lambda_handler", ) fn.add_to_role_policy( statement=iam.PolicyStatement( resources=[ "*", ], actions=[ "codecommit:PostCommentForPullRequest", ] ), ) return fn
def _add_batch_head_node_policies_to_role(self): iam.CfnPolicy( self, "ParallelClusterBatchPoliciesHeadNode", policy_name="parallelcluster-awsbatch-head-node", policy_document=iam.PolicyDocument(statements=[ iam.PolicyStatement( sid="BatchJobPassRole", actions=["iam:PassRole"], effect=iam.Effect.ALLOW, resources=[ self._format_arn( service="iam", region="", resource=f"role{self._cluster_scoped_iam_path()}*", ) ], ), ]), roles=[self.head_node_instance_role.ref], )
def add_deny_for_everyone_except( self, master_secret: secretsmanager.Secret, producer_functions: List[lambda_.Function], ) -> None: """ Sets up the master secret resource policy so that everything *except* the given functions is denied access to GetSecretValue. Args: master_secret: the master secret construct producer_functions: a list of functions we are going to set as the only allowed accessors """ # this end locks down the master secret so that *only* the JWT producer can read values # (it is only when we set the DENY policy here that in general other roles in the same account # cannot access the secret value - so it is only after doing that that we need to explicitly enable # the role we do want to access it) role_arns: List[str] = [] for f in producer_functions: if not f.role: raise Exception( f"Rotation function {f.function_name} has somehow not created a Lambda role correctly" ) role_arns.append(f.role.role_arn) master_secret.add_to_resource_policy( iam.PolicyStatement( effect=iam.Effect.DENY, actions=["secretsmanager:GetSecretValue"], resources=["*"], principals=[iam.AccountRootPrincipal()], # https://stackoverflow.com/questions/63915906/aws-secrets-manager-resource-policy-to-deny-all-roles-except-one-role conditions={ "ForAllValues:StringNotEquals": { "aws:PrincipalArn": role_arns } }, ))
def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) vpc = ec2.Vpc(self, "ecs-load-test", cidr="10.0.0.0/22", max_azs=3) cluster = ecs.Cluster(self, "load-test-cluser", vpc=vpc) repository = ecr.Repository(self, "spring-boot-helloworld", image_scan_on_push=True) role = iam.Role(self, "ecs-allow-cw-role", assumed_by=iam.ServicePrincipal("ecs-tasks.amazonaws.com")) role.add_to_policy(iam.PolicyStatement( effect=iam.Effect.ALLOW, resources=['*'], actions=["cloudwatch:*"] )) task_definition = ecs.FargateTaskDefinition( self, "spring-boot-td", task_role=role, cpu=512, memory_limit_mib=2048) image = ecs.ContainerImage.from_ecr_repository(repository, "v24") container = task_definition.add_container( "spring-boot-container", image=image, logging=ecs.LogDrivers.aws_logs(stream_prefix="loadtest")) port_mapping = ecs.PortMapping(container_port=8080, host_port=8080) container.add_port_mappings(port_mapping) fargate_service = ecs_patterns.ApplicationLoadBalancedFargateService(self, "test-service", cluster=cluster, task_definition=task_definition, desired_count=2, cpu=512, memory_limit_mib=2048, public_load_balancer=True) fargate_service.target_group.set_attribute("deregistration_delay.timeout_seconds", "10")
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # DDB table = aws_dynamodb.Table(self, "pinpoint_category", partition_key=aws_dynamodb.Attribute( name="category", type=aws_dynamodb.AttributeType.STRING), sort_key=aws_dynamodb.Attribute( name="event_time", type=aws_dynamodb.AttributeType.NUMBER)) # LAMBDA function = aws_lambda.Function( self, "pinpoint_send_campaign", runtime=aws_lambda.Runtime.PYTHON_3_7, handler="pinpoint_campaign_handler.lambda_handler", code=aws_lambda.Code.asset("./lambda_fn"), environment={'CATEGORY_TABLE_NAME': table.table_name}) table.grant_read_write_data(function) lambdaPinpointSendCampaignRole = function.role pinpoint_project_arn = "arn:aws:mobiletargeting:" + self.region + ":" + self.account + ":apps/" + config.PINPOINT_CONFIG[ 'application_id'] pinpointSendCampaignPolicyStatement = aws_iam.PolicyStatement( actions=[ "mobiletargeting:CreateCampaign", "mobiletargeting:GetSegments", "mobiletargeting:GetSegment" ], resources=[pinpoint_project_arn], effect=aws_iam.Effect.ALLOW) lambdaPinpointSendCampaignRole.add_to_policy( pinpointSendCampaignPolicyStatement) # API Gateway api = aws_apigateway.LambdaRestApi(self, "apiSendCampaign", handler=function)
def __init__(self, scope: core.Construct, id: str, cluster: ecs.ICluster, repo: ecr.IRepository, **kwargs) -> None: super().__init__(scope, id, **kwargs) # bucket self.xmlBucket = s3.Bucket( scope=self, id="XmlBucket", block_public_access=s3.BlockPublicAccess.BLOCK_ALL, encryption=s3.BucketEncryption.S3_MANAGED) core.CfnOutput(scope=self, id="XmlBucketName", value=self.xmlBucket.bucket_name) # service skeleton batch_task_definition = ecs.FargateTaskDefinition( scope=self, id="BatchTaskDef", cpu=2048, memory_limit_mib=4096, volumes=[ecs.Volume(name='storage')]) batch_container = batch_task_definition.add_container( id="BatchContainer", image=ecs.ContainerImage.from_ecr_repository(repository=repo, tag='latest'), logging=ecs.LogDrivers.aws_logs(stream_prefix="BatchProcessing"), environment={'BUCKET': self.xmlBucket.bucket_name}) batch_container.add_mount_points( ecs.MountPoint(container_path='/opt/data', read_only=False, source_volume='storage')) batch_task_definition.task_role.add_to_policy( statement=iam.PolicyStatement(resources=[ self.xmlBucket.bucket_arn, self.xmlBucket.bucket_arn + '/*' ], actions=['s3:*'])) ssm.StringParameter(scope=self, id='SSMParamBatchImageName', string_value=batch_container.container_name, parameter_name='image_batch')
def __init__(self, scope: core.Stack, id: str, **kwargs): super().__init__(scope, id, **kwargs) self.base_platform = BasePlatform(self, self.stack_name) self.task_image = aws_ecs_patterns.ApplicationLoadBalancedTaskImageOptions( image=aws_ecs.ContainerImage.from_registry("adam9098/ecsdemo-capacityproviders:latest"), container_port=5000, environment={ 'AWS_DEFAULT_REGION': getenv('AWS_DEFAULT_REGION') } ) self.load_balanced_service = aws_ecs_patterns.ApplicationLoadBalancedEc2Service( self, "EC2CapacityProviderService", service_name='ecsdemo-capacityproviders-ec2', cluster=self.base_platform.ecs_cluster, cpu=256, memory_limit_mib=512, desired_count=3, #desired_count=12, public_load_balancer=True, task_image_options=self.task_image, ) # This should work, but the default child is not the service cfn, it's a list of cfn service and sec group #self.cfn_resource = self.load_balanced_service.service.node.default_child self.cfn_resource = self.load_balanced_service.service.node.children[0] self.cfn_resource.add_deletion_override("Properties.LaunchType") self.load_balanced_service.task_definition.add_to_task_role_policy( aws_iam.PolicyStatement( actions=[ 'ecs:ListTasks', 'ecs:DescribeTasks' ], resources=['*'] ) )
def __init__(self, scope: core.Construct, id: str, common_stack: CommonStack, **kwargs) -> None: super().__init__(scope, id, **kwargs) self._supported_in_region = self.is_service_supported_in_region() self.create_bucket(common_stack) all_resources_policy = aws_iam.PolicyStatement( effect=aws_iam.Effect.ALLOW, actions=[ "polly:DeleteLexicon", "polly:GetSpeechSynthesisTask", "polly:ListSpeechSynthesisTasks", "polly:PutLexicon", "polly:StartSpeechSynthesisTask", "polly:SynthesizeSpeech", ], resources=["*"], ) common_stack.add_to_common_role_policies(self, policy_to_add=all_resources_policy) self.save_parameters_in_parameter_store(platform=Platform.IOS)
def _create_site_bucket(self): """Creates a public S3 bucket for the static site construct""" self.bucket = s3.Bucket( self, "site_bucket", bucket_name=self._site_domain_name, website_index_document="index.html", website_error_document="404.html", removal_policy=RemovalPolicy.DESTROY, auto_delete_objects=True, ) bucket_policy = iam.PolicyStatement( actions=["s3:GetObject"], resources=[self.bucket.arn_for_objects("*")], principals=[iam.AnyPrincipal()], ) bucket_policy.add_condition( "StringEquals", {"aws:Referer": self.__origin_referer_header}, ) self.bucket.add_to_resource_policy(bucket_policy)
def _emr_artifacts_policy() -> iam.PolicyDocument: return iam.PolicyDocument( statements=[ iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ 's3:GetObject*', 's3:List*' ], resources=[ 'arn:aws:s3:::elasticmapreduce', 'arn:aws:s3:::elasticmapreduce/*', 'arn:aws:s3:::elasticmapreduce.samples', 'arn:aws:s3:::elasticmapreduce.samples/*', 'arn:aws:s3:::*.elasticmapreduce', 'arn:aws:s3:::*.elasticmapreduce/*', 'arn:aws:s3:::*.elasticmapreduce.samples', 'arn:aws:s3:::*.elasticmapreduce.samples/*' ] ) ] )
def create_service_account(self) -> None: """ Creates the k8s `cluster-autoscaler` service account. This _also_ creates a corresponding IAM Role with the policies to manage the autoscaling group that has the proper trust relationships defined. """ sa = self.eks_cluster.add_service_account("cluster-autoscaler", name="cluster-autoscaler", namespace="kube-system") sa.add_to_principal_policy( iam.PolicyStatement( actions=[ "autoscaling:DescribeAutoScalingGroups", "autoscaling:DescribeAutoScalingInstances", "autoscaling:DescribeLaunchConfigurations", "autoscaling:DescribeTags", "autoscaling:SetDesiredCapacity", "autoscaling:TerminateInstanceInAutoScalingGroup", ], resources=["*"], ))
def create_project(self, target_function, stage): project = codebuild.PipelineProject( self, self.create_id("Project", stage), project_name=self.create_name(stage), environment_variables={ "FUNCTION_NAME": codebuild.BuildEnvironmentVariable( value=target_function.function_name, type=codebuild.BuildEnvironmentVariableType.PLAINTEXT), "STAGE": codebuild.BuildEnvironmentVariable( value=stage, type=codebuild.BuildEnvironmentVariableType.PLAINTEXT) } ) project.add_to_role_policy( iam.PolicyStatement( resources=[target_function.function_arn], actions=['lambda:UpdateFunctionCode', 'lambda:UpdateFunctionConfiguration'] ) ) return project
def __init__(self, scope: core.Construct, id: str, common_stack: CommonStack, **kwargs) -> None: super().__init__(scope, id, **kwargs) identity_pools = [self.identity_pool(i) for i in range(2)] self._supported_in_region = self.is_service_supported_in_region( "cognito-identity") # Create an SSM parameter for the identity pool IDs self._parameters_to_save = { "identity_pool_id": identity_pools[0].ref, "other_identity_pool_id": identity_pools[1].ref, } self.save_parameters_in_parameter_store(platform=Platform.ANDROID) stack_policy = iam.PolicyStatement(effect=iam.Effect.ALLOW, actions=["cognito-identity:*"], resources=["*"]) common_stack.add_to_common_role_policies(self, policy_to_add=stack_policy)
def __init__(self, app: core.App, id: str) -> None: super().__init__(app, id) with open("ssm.json", encoding="utf8") as ssmjson: ssmvalue = ssmjson.read() param = ssm.StringParameter( self, "bunq2ynabParam", parameter_name='/lambda/bunq2ynab', string_value=ssmvalue ) lambdaFn = lambda_.Function( self, "MyCDKapp", code=lambda_.Code.asset('./sam-app/.aws-sam/build/bunq2ynab'), handler="app.lambda_handler", timeout=core.Duration.seconds(300), runtime=lambda_.Runtime.PYTHON_3_7, environment= { "LIST_MODE": "0", "LOG_LEVEL": "INFO", "SSM_PARAM": param.parameter_name } ) #Add ssm permissions to the Lambda Role. polst = iam.PolicyStatement( actions=['ssm:GetParameter','ssm:PutParameter'], resources=[param.parameter_arn] ) lambdaFn.add_to_role_policy(polst) #Run every 15 minutes. rule = events.Rule( self, "Rule", schedule=events.Schedule.expression('rate(15 minutes)') ) rule.add_target(targets.LambdaFunction(lambdaFn))
def create_es_domain(self) -> None: """ Create Elasticsearch domain and complete configuration for lambdas that uses it. """ es_lambdas: List[aws_lambda.Function] = [ self.lambdas_["create_elastic_index_lambda"], self.lambdas_["insert_into_elastic_lambda"], self.api_lambdas_["SearchEndpointLambda"], ] esd = elasticsearch.Domain( self, id="cbers2stac", # This is the version currently used by localstack version=elasticsearch.ElasticsearchVersion.V7_7, ebs=elasticsearch.EbsOptions(enabled=True, volume_size=settings.es_volume_size), capacity=elasticsearch.CapacityConfig( data_node_instance_type=settings.es_instance_type, data_nodes=settings.es_data_nodes, ), access_policies=[ iam.PolicyStatement( actions=["es:*"], principals=[ lambda_f.grant_principal for lambda_f in es_lambdas ], # No need to specify resource, the domain is implicit ) ], ) # Add environment for lambdas for lambda_f in es_lambdas: lambda_f.add_environment("ES_ENDPOINT", esd.domain_endpoint) lambda_f.add_environment("ES_PORT", "443") lambda_f.add_environment("ES_SSL", "YES")
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # Lambda Function that takes in text and returns a polly voice synthesis polly_lambda = _lambda.Function(self, 'pollyHandler', runtime=_lambda.Runtime.PYTHON_3_8, code=_lambda.Code.asset('lambdas'), handler='polly.handler') # https://docs.aws.amazon.com/polly/latest/dg/api-permissions-reference.html # https://docs.aws.amazon.com/translate/latest/dg/translate-api-permissions-ref.html polly_policy = iam.PolicyStatement(effect=iam.Effect.ALLOW, resources=['*'], actions=['translate:TranslateText', 'polly:SynthesizeSpeech']) polly_lambda.add_to_role_policy(polly_policy) # defines an API Gateway Http API resource backed by our "efs_lambda" function. api = api_gw.HttpApi(self, 'Polly', default_integration=api_gw.LambdaProxyIntegration(handler=polly_lambda)) core.CfnOutput(self, 'HTTP API Url', value=api.url)