def __init__(self, scope: cdk.Construct, id: str, **kwargs) -> None: super().__init__( scope, id, ) with open("custom-resource-handler.py", encoding="utf-8") as fp: code_body = fp.read() resource = cfn.CustomResource( self, "Resource", provider=cfn.CustomResourceProvider.lambda_( lambda_.SingletonFunction( self, "Singleton", uuid="f7d4f730-4ee1-11e8-9c2d-fa7ae01bbebc", code=lambda_.InlineCode(code_body), handler="index.main", timeout=300, runtime=lambda_.Runtime.PYTHON27, )), properties=kwargs, ) self.response = resource.get_att("Response").to_string()
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id) with open("custom_resource/random_string_generator_lambda_function.py", encoding="utf-8") as fp: code_body = fp.read() # Use `uuidgen` in bash to generate new ones random_string_generator_fn=lambda_.SingletonFunction( self, "Singleton", uuid="RANDOMF2-F7DB-4561-B7AC-4C9730D10E95", code=lambda_.InlineCode(code_body), handler="index.lambda_handler", timeout=core.Duration.seconds(300), runtime=lambda_.Runtime.PYTHON_3_7, ) resource = cfn.CustomResource( self, "Resource", provider=cfn.CustomResourceProvider.lambda_( random_string_generator_fn ), properties=kwargs, ) self.response = resource.get_att("Response").to_string()
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id) es_host = kwargs.get("es_host") es_region = kwargs.get("es_region") es_domain_arn = kwargs.get("es_domain_arn") function = lambda_.SingletonFunction( self, "Singleton", uuid="e43d1f1e-5676-415c-84d5-d376069aa0da", code=lambda_.Code.asset("./lambda/load-es-index.zip"), handler="lambda_function.handler", timeout=core.Duration.seconds(300), runtime=lambda_.Runtime.PYTHON_3_7, environment={ 'ES_HOST': es_host, 'ES_REGION': es_region }) function.add_to_role_policy( iam.PolicyStatement(actions=['es:ESHttpPost', 'es:ESHttpPut'], resources=[es_domain_arn], effect=iam.Effect.ALLOW)) resource = cfn.CustomResource( self, "Resource", provider=cfn.CustomResourceProvider.lambda_(function), properties=kwargs, ) self.response = resource.get_att("Response").to_string()
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id) with open("crd_function/crd.py") as fp: code_body = fp.read() crd_lambda = lambda_.SingletonFunction( self, "Singleton", uuid=str(uuid4()), code=lambda_.InlineCode(code_body), handler="index.lambda_handler", timeout=core.Duration.seconds(300), runtime=lambda_.Runtime.PYTHON_3_7, ) crd_lambda.add_to_role_policy( statement=iam.PolicyStatement( actions=["inspector:SubscribeToEvent"], resources=["*"] ) ) resource = cfn.CustomResource( self, "Resource", provider=cfn.CustomResourceProvider.lambda_(handler=crd_lambda), properties=kwargs, ) self.response = resource.get_att("Response").to_string()
def __init__(self, scope: core.Construct, id: str, idp_name: str, idp_url: str, *, cfn_lambda: str = None, cfn_resources_path: str = None, debug=False): """Create an IAM SAML Identity Provider Args: scope (core.Construct): [description] id (str): [description] idp_name (str): IAM Idp name idp_url (str): Your SAML Identity provider URL """ rdir = sys.prefix + '/share/aviv-cdk/iam-idp/' if not cfn_lambda: p = os.path.dirname(os.path.dirname(__file__)) cfn_lambda = p + '/lambdas/iam_idp/saml.py' lambda_attrs = dict(code=aws_lambda.InlineCode( CDKLambda._code_inline(cfn_lambda)), handler='index.handler', timeout=core.Duration.seconds(20), runtime=aws_lambda.Runtime.PYTHON_3_7) if not cfn_resources_path: cfn_resources_path = rdir + 'artifacts-cfn_resources.zip' layer_attrs = dict(description='cfn_resources layer for idp', code=aws_lambda.AssetCode(cfn_resources_path)) super().__init__(scope, id, lambda_attrs=lambda_attrs, layer_attrs=layer_attrs, remote_account_grant=False) # Add required policies for the lambda to create an IAM idp self._lambda.add_to_role_policy( iam.PolicyStatement(actions=[ 'iam:CreateSAMLProvider', 'iam:UpdateSAMLProvider', 'iam:DeleteSAMLProvider' ], effect=iam.Effect.ALLOW, resources=['*'])) self._idp = cfn.CustomResource( self, "identityProvider", resource_type='Custom::SAMLProvider', provider=cfn.CustomResourceProvider.lambda_(self._lambda), properties=dict(Name=idp_name, URL=idp_url)) self.response = self._idp.get_att("Response").to_string() # Export ssm_name = '/' + id.replace('-', '/') ssm.StringParameter(self, 'ssm', string_value=self._idp.ref, parameter_name=ssm_name) core.CfnOutput(self, 'IAMIdpSAMLArn', value=self._idp.ref)
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id) table_name = kwargs.get("table_name") table_arn = kwargs.get("table_arn") function = lambda_.SingletonFunction( self, "Singleton", uuid="22fbda4b-ee9f-4317-9489-c118134d8e97", code=lambda_.Code.asset("./lambda/load-ddb-data.zip"), handler="lambda_function.handler", timeout=core.Duration.seconds(300), runtime=lambda_.Runtime.PYTHON_3_7, environment={'TABLE_NAME': table_name}) function.add_to_role_policy( iam.PolicyStatement(actions=['dynamodb:PutItem'], resources=["*"], effect=iam.Effect.ALLOW)) resource = cfn.CustomResource( self, "Resource", provider=cfn.CustomResourceProvider.lambda_(function), properties=kwargs, ) self.response = resource.get_att("Response").to_string()
def __init__(self, scope: core.Construct, id: str, props: CustomResourceProps, **kwargs) -> None: super().__init__(scope, id) name = props.name lambda_directory = props.lambda_directory handler = props.handler timeout = props.timeout runtime = props.runtime environment = props.environment resource_properties = props.resource_properties lambda_uuid = str(uuid.uuid5(uuid.NAMESPACE_DNS, name + handler)) resource = cfn.CustomResource( self, "Resource", provider=cfn.CustomResourceProvider.lambda_( lambda_.SingletonFunction( self, "Singleton", environment=environment, function_name=name, uuid=lambda_uuid, code=lambda_.AssetCode(lambda_directory), handler=handler, timeout=core.Duration.seconds(timeout), runtime=runtime, )), properties=resource_properties, ) self.resource = resource self.response = resource.get_att("Response").to_string()
def __init__(self, scope: core.Construct, id: str, Description: str, Uuid: str, **kwargs) -> None: super().__init__(scope, id) with open(kwargs["HandlerPath"], encoding="utf-8") as fp: code_body = fp.read() resource = cfn.CustomResource( self, "Resource", provider=cfn.CustomResourceProvider.lambda_( lambda_.SingletonFunction( self, "Singleton", description=Description, uuid=Uuid, code=lambda_.InlineCode(code_body), handler="index.main", timeout=core.Duration.seconds(300), runtime=lambda_.Runtime.PYTHON_3_7, initial_policy=kwargs["ResourcePolicies"], log_retention=logs.RetentionDays.ONE_DAY, )), properties=kwargs, ) # response self.response = resource.get_att("Response")
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id) # Read Lambda Code:) try: with open( "waf_stacks/custom_resources/waf_rate_rule_creator/lambda_src/index.py", encoding="utf-8", mode="r") as f: waf_rate_rule_creator_fn_code = f.read() except OSError: print("Unable to read Lambda Function Code") raise # Create IAM Permission Statements that are required by the Lambda role_stmt1 = _iam.PolicyStatement( effect=_iam.Effect.ALLOW, resources=["*"], actions=["wafv2:GetWebACL", "wafv2:UpdateWebACL"]) role_stmt1.sid = "AllowLambdaToCreateWafRules" waf_rate_rule_creator_fn = _lambda.SingletonFunction( self, "waFRateRuleCreatorSingleton", uuid="mystique30-4ee1-11e8-9c2d-fa7ae01bbebc", code=_lambda.InlineCode(waf_rate_rule_creator_fn_code), handler="index.lambda_handler", timeout=core.Duration.seconds(10), runtime=_lambda.Runtime.PYTHON_3_7, reserved_concurrent_executions=1, environment={ "LOG_LEVEL": "INFO", "APP_ENV": "Production" }, description="Creates a rate based WAF rule") waf_rate_rule_creator_fn.add_to_role_policy(role_stmt1) # Create Custom Log group waf_rate_rule_creator_fn_lg = _logs.LogGroup( self, "wafRateRuleCreatorLogGroup", log_group_name= f"/aws/lambda/{waf_rate_rule_creator_fn.function_name}", retention=_logs.RetentionDays.ONE_WEEK, removal_policy=core.RemovalPolicy.DESTROY) waf_rate_rule_creator = cfn.CustomResource( self, "wafRateRuleCreatorCustomResource", provider=cfn.CustomResourceProvider.lambda_( waf_rate_rule_creator_fn), properties=kwargs, ) self.response = waf_rate_rule_creator.get_att( "rule_add_status").to_string()
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id) with open("custom_resource/iam_user_tagger_lambda_function.py", encoding="utf-8") as fp: code_body = fp.read() statement = iam.PolicyStatement() # https://docs.aws.amazon.com/IAM/latest/UserGuide/id_tags.html statement.add_actions("iam:TagUser") statement.add_actions("iam:UntagUser") statement.add_all_resources() statement.effect = iam.Effect.ALLOW iam_tagger_fn = lambda_.SingletonFunction( self, "Singleton", uuid="tagger30-4ee1-11e8-9c2d-fa7ae01bbebc", code=lambda_.InlineCode(code_body), handler="index.lambda_handler", timeout=core.Duration.seconds(300), runtime=lambda_.Runtime.PYTHON_3_7, ) iam_tagger_fn.add_to_role_policy(statement) """ resource = cfn.CustomResource( self, "Resource", provider=cfn.CustomResourceProvider.lambda_( lambda_.SingletonFunction( self, "Singleton", uuid="f7d4f730-4ee1-11e8-9c2d-fa7ae01bbebc", code=lambda_.InlineCode(code_body), handler="index.main", timeout=core.Duration.seconds(300), runtime=lambda_.Runtime.PYTHON_3_7, ) ), properties=kwargs, ) """ resource = cfn.CustomResource( self, "Resource", provider=cfn.CustomResourceProvider.lambda_(iam_tagger_fn), properties=kwargs, ) self.response = resource.get_att("Response").to_string()
def __init__(self, scope: core.Construct, id: str, *, ecs_cluster: str, ecs_service: str, production_target_group: str, production_port: int, test_target_group: str, test_port: int, **kwargs): super().__init__(scope, id, **kwargs) provider_function = _lambda_.Function( self, "provider_function", runtime=_lambda_.Runtime.PYTHON_3_7, handler="index.main", code=_lambda_.Code.asset("./custom_codedeploy/code/")) provider_function.add_to_role_policy( statement=iam.PolicyStatement(actions=[ "codedeploy:CreateDeploymentGroup", "codedeploy:GetDeploymentGroup", "codedeploy:DeleteDeploymentGroup" ], resources=["*"], effect=iam.Effect.ALLOW)) cd_srv_role = iam.Role( self, "CodeDeployServiceRole", managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name( "AWSCodeDeployRole"), iam.ManagedPolicy.from_aws_managed_policy_name( "AWSCodeDeployRoleForECS") ], assumed_by=iam.ServicePrincipal( service="codedeploy.amazonaws.com")) cfn.CustomResource( self, "DeploymentGroup", provider=cfn.CustomResourceProvider.lambda_(provider_function), properties={ "Service": ecs_service, "Cluster": ecs_cluster, "Application": production_target_group, "TestTargetGroup": test_target_group, "ProductionPort": production_port, "TestPort": test_port, "Role": cd_srv_role.role_arn })
def __init__(self, scope: core.Construct, id: str, remove_repository_lambda_arn: str, repository_name: str) -> None: super().__init__(scope, id) remove_repository_lambda = _lambda.Function.from_function_arn( scope=self, id='REMOVE-REPOSITORY-LAMBDA', function_arn=remove_repository_lambda_arn ) resource = cfn.CustomResource( scope=self, id="RESOURCE-REMOVE-REPOSITORY", provider=cfn.CustomResourceProvider.from_lambda( handler=remove_repository_lambda), properties={'RepositoryName': repository_name}, ) self.response = resource.get_att("Response").to_string()
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id) print('reading code source...') with open("./cdk_blog_vpc/lambda/lambda_function.py", encoding="utf-8") as fp: code_body = fp.read() my_lambda_role = iam.Role( self, "Role_lambda", assumed_by=iam.ServicePrincipal("lambda.amazonaws.com")) my_lambda_role.add_to_policy( iam.PolicyStatement( effect=iam.Effect.ALLOW, resources=["*"], actions=[ "logs:*", "ec2:DescribeVpcs", "ec2:DescribeInstances", "ec2:DescribeInstanceAttribute", "dynamodb:PutItem", "ec2:DescribeSubnets", "ec2:DescribeVpcPeeringConnections", "ec2:DescribeRouteTables", "ec2:CreateRoute", "ec2:ReplaceRouteTableAssociation", "ec2:CreateRouteTable", "ec2:DisassociateRouteTable", "ec2:AssociateRouteTable", "ec2:DeleteRoute", "ec2:ReplaceRoute", "ec2:DeleteRouteTable" ])) _uuid = uuid.uuid1() resource = cfn.CustomResource( self, "Resource", provider=cfn.CustomResourceProvider.lambda_( aws_lambda.SingletonFunction( self, "Singleton", uuid=str(_uuid), code=aws_lambda.Code.from_asset("./cdk_blog_vpc/lambda"), handler="lambda_function.lambda_handler", timeout=core.Duration.seconds(300), runtime=aws_lambda.Runtime.PYTHON_3_7, role=my_lambda_role)), properties=kwargs, ) self.response = resource.get_att("Response").to_string()
def __init__(self, scope: core.Construct, id: str, config_params, ** kwargs) -> None: super().__init__(scope, id) # Read LambdaFunction Code try: with open("load_generator_stacks/custom_resources/trigger_run_task/lambda_src/trigger_run_task_lambda_function.py", encoding="utf-8") as fp: code_body = fp.read() except OSError: print('Unable to read UserData script') # Create IAM Permission Statements that are required by the Lambda trigger_run_task_fn = _lambda.SingletonFunction( self, "Singleton", uuid="mystique2010-4ee1-11e8-9c2d-fa7ae01bbebc", code=_lambda.InlineCode(code_body), handler="index.lambda_handler", timeout=core.Duration.seconds(300), runtime=_lambda.Runtime.PYTHON_3_7, environment={ "RUN_TASK_FN_ARN": config_params.get("RUN_TASK_FN_ARN") }, # security_group=config_params.get('RUN_TASK_FN_ARN'), ) roleStmt1 = _iam.PolicyStatement( effect=_iam.Effect.ALLOW, resources=["*"], actions=["lambda:InvokeFunction"] ) roleStmt1.sid = "AllowLambdaToInvokeLambda" trigger_run_task_fn.add_to_role_policy(roleStmt1) resource = cfn.CustomResource( self, "Resource", provider=cfn.CustomResourceProvider.lambda_( trigger_run_task_fn ), properties=kwargs, ) self.response = resource.get_att("Response").to_string()
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # Parameters parameters = core.CfnParameter( self, "SourceBucket", description="Building on AWS Cognito Stack Modified https://github.com/rosberglinhares/CloudFormationCognitoCustomResources", default="default" ) LogoutURL = core.CfnParameter( self, "LogoutURL", type="String", default="http://localhost" ) CallbackURL = core.CfnParameter( self, "CallbackURL", type="String", default="http://localhost/callback" ) AppDomain = core.CfnParameter( self, "AppDomain", type="String", default="default" ) # CognitoSNSPolicy CognitoSNSPolicy = iam.CfnManagedPolicy( self, 'CognitoSNSPolicy', description='Managed policy to allow Amazon Cognito to access SNS', policy_document={ "Version": "2012-10-17", "Statement": { "Effect": "Allow", "Action": ["sns:publish"], "Resource": "*" } }) # SNSRole SNSRole = iam.CfnRole( self, "SNSRole", role_name="SNSRole", managed_policy_arns=[CognitoSNSPolicy.ref], assume_role_policy_document={ "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": ["sts:AssumeRole"], "Principal": {"Service": ["cognito-idp.amazonaws.com"]} }] } ) SNSRole.add_depends_on(CognitoSNSPolicy) # CognitoUserPool CognitoUserPool = cognito.CfnUserPool( self, 'UserPool', user_pool_name='photos-pool', alias_attributes=[ "email", "phone_number"], auto_verified_attributes=[ "email"], email_verification_message="Hi, Your verification code is <br/>{####}\n", email_verification_subject="EDX Email Verification", mfa_configuration="OPTIONAL", policies={ "passwordPolicy": { "minimumLength": 8, "requireLowercase": True, "requireNumbers": True, "requireSymbols": True, "requireUppercase": True } }, schema=[{ "attributeDataType": "String", "mutable": False, "name": "nickname", "required": True }, { "attributeDataType": "String", "mutable": False, "name": "email", "required": True }, { "attributeDataType": "String", "mutable": False, "name": "phone_number", "required": True }], sms_configuration={ "externalId": "%s-external" % (core.Aws.STACK_NAME), "snsCallerArn": SNSRole.attr_arn } ) # CognitoUserPoolClient CognitoUserPoolClient = cognito.CfnUserPoolClient( self, "UserPoolClient", client_name="WebsiteClient", generate_secret=True, user_pool_id=CognitoUserPool.ref ) # CognitoCustomResourceRole CustomResourceRole = iam.CfnRole( self, "CustomResourceRole", role_name="cognito-resource-role", assume_role_policy_document={ "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Principal": { "Service": [ "lambda.amazonaws.com" ] }, "Action": [ "sts:AssumeRole" ] } ] }, policies=[ { "policyName": "writeCloudWatchLogs", "policyDocument": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": "logs:CreateLogGroup", "Resource": "arn:aws:logs:*:*:*" }, { "Effect": "Allow", "Action": "logs:CreateLogStream", "Resource": "arn:aws:logs:*:*:*" }, { "Effect": "Allow", "Action": "logs:PutLogEvents", "Resource": "arn:aws:logs:*:*:*" } ] } }, { "policyName": "updateUserPoolClient", "policyDocument": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": "cognito-idp:UpdateUserPoolClient", "Resource": "arn:aws:cognito-idp:*:*:userpool/*" } ] } }, { "policyName": "manageUserPoolDomain", "policyDocument": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": "cognito-idp:CreateUserPoolDomain", "Resource": "arn:aws:cognito-idp:*:*:userpool/*" }, { "Effect": "Allow", "Action": "cognito-idp:DeleteUserPoolDomain", "Resource": "arn:aws:cognito-idp:*:*:userpool/*" }, { "Effect": "Allow", "Action": "cognito-idp:DescribeUserPoolDomain", "Resource": "*" }, { "Effect": "Allow", "Action": "cognito-idp:DescribeUserPoolClient", "Resource": "*" } ] } }, { "policyName": "invokeLambdaFunction", "policyDocument": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": "lambda:InvokeFunction", "Resource": "arn:aws:lambda:*:*:function:*" } ] } }, ] ) # CognitoUserPoolClientClientSettings with open("./cdk/CognitoUserPoolClientClientSettings/index.js", encoding="utf-8") as fp: code_body = fp.read() CognitoUserPoolClientClientSettings = cfn.CustomResource( self, "CognitoUserPoolClientClientSettings", provider=cfn.CustomResourceProvider.lambda_( lambda_.SingletonFunction( self, "CognitoUserPoolClientClientSettingsLambda", uuid="f7d4f730-4ee1-11e8-9c2d-fa7ae01bbebc", code=lambda_.InlineCode(code_body), handler="index.handler", runtime=lambda_.Runtime.NODEJS_8_10, role=iam.Role.from_role_arn( self, 'CustomResourceRoleiam', role_arn=CustomResourceRole.attr_arn) ) ), properties={"UserPoolId": CognitoUserPool.ref, "UserPoolClientId": CognitoUserPoolClient.ref, "AppDomain": AppDomain.value_as_string, "SupportedIdentityProviders": ['COGNITO'], "CallbackURL": CallbackURL.value_as_string, "LogoutURL": LogoutURL.value_as_string, "AllowedOAuthFlowsUserPoolClient": True, "AllowedOAuthFlows": ['code'], "AllowedOAuthScopes": ['openid'] }, ) # CognitoIdPool CognitoIdPool = cognito.CfnIdentityPool( self, 'CognitoIdPool', identity_pool_name='edxcognitoidpool', cognito_identity_providers=[{ "clientId": CognitoUserPoolClient.ref, "providerName": CognitoUserPool.attr_provider_name }], allow_unauthenticated_identities=False ) # Output core.CfnOutput(self, "CognitoUserPoolIdOutput", value=CognitoUserPool.ref, description="The Pool ID of the Cognito User Pool", export_name="CognitoUserPoolId" ) core.CfnOutput(self, "CognitoUserPoolProviderURLOutput", value=CognitoUserPool.attr_provider_url, description="The Pool ProviderURL of the Cognito User Pool", export_name="CognitoUserPoolProviderURL" ) core.CfnOutput(self, "CognitoUserPoolArnOutput", value=CognitoUserPool.attr_arn, description="The Pool Arn of the Cognito User Pool", export_name="CognitoUserPoolArn" ) core.CfnOutput(self, "CognitoUserPoolClientIdOutput", value=CognitoUserPoolClient.ref, description="The App Client ID ", export_name="CognitoUserPoolClientId" ) core.CfnOutput(self, "ClientSecretOutput", value=core.Fn.get_att( "CognitoUserPoolClientClientSettings", "ClientSecret").to_string(), description="The Client Secret ", export_name="ClientSecret" )
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # The code that defines your stack goes here lambda_policies = [ iam.PolicyStatement(actions=[ "logs:CreateLogStream", "logs:PutLogEvents", "logs:CreateLogGroup" ], effect=iam.Effect.ALLOW, resources=[ "arn:aws:logs:" + core.Aws.REGION + ":" + core.Aws.ACCOUNT_ID + ":*" ]), iam.PolicyStatement(actions=["dynamodb:*"], effect=iam.Effect.ALLOW, resources=[ "arn:aws:dynamodb:" + core.Aws.REGION + ":" + core.Aws.ACCOUNT_ID + ":*" ]) ] base_api = _apigw.RestApi(self, 'PetclinicApiGatewayWithCors', rest_api_name='PetclinicApiGatewayWithCors') api_resource = base_api.root.add_resource('api') website_bucket = _s3.Bucket(self, 'PetclinicWebsite', website_index_document='index.html', public_read_access=True, removal_policy=core.RemovalPolicy.DESTROY) deployment = _s3deploy.BucketDeployment( self, 'PetclinicDeployWebsite', sources=[_s3deploy.Source.asset('./spring-petclinic-static')], destination_bucket=website_bucket, retain_on_delete=False #destination_key_prefix='web/static' ) # Modify the config.js with CF custome resource modify_policy = [ iam.PolicyStatement(actions=[ "s3:PutObject", "s3:PutObjectAcl", "s3:PutObjectVersionAcl", "s3:GetObject" ], effect=iam.Effect.ALLOW, resources=[website_bucket.bucket_arn + "/*"]), iam.PolicyStatement(actions=["s3:ListBucket"], effect=iam.Effect.ALLOW, resources=[website_bucket.bucket_arn]), iam.PolicyStatement(actions=["dynamodb:*"], effect=iam.Effect.ALLOW, resources=[ "arn:aws:dynamodb:" + core.Aws.REGION + ":" + core.Aws.ACCOUNT_ID + ":*" ]) ] with open("custom-resource-code/init.py", encoding="utf-8") as fp: code_body = fp.read() dynamodb_tables = [] for service in ['customer', 'vet', 'visit']: table = _dynamodb.Table( self, service.capitalize() + 'Table', partition_key={ 'name': 'id', 'type': _dynamodb.AttributeType.STRING }, removal_policy=core.RemovalPolicy.DESTROY, read_capacity=5, write_capacity=5, ) dynamodb_tables.append(table.table_name) base_lambda = _lambda.Function( self, 'ApiPetclinic' + service.capitalize() + 'Lambda', handler='org.springframework.samples.petclinic.' + service + 's.StreamLambdaHandler::handleRequest', runtime=_lambda.Runtime.JAVA_8, code=_lambda.Code.asset( './spring-petclinic-serverless/spring-petclinic-' + service + 's-serverless/target/spring-petclinic-' + service + 's-serverless-2.0.7.jar'), memory_size=1024, timeout=core.Duration.seconds(300), initial_policy=lambda_policies, environment={ "DYNAMODB_TABLE_NAME": table.table_name, "SERVER_SERVLET_CONTEXT_PATH": "/api/" + service }) base_version = base_lambda.add_version( name='v1', provisioned_executions=1) #Added for warm the Java Lambda entity = api_resource.add_resource(service) entity.add_proxy( default_integration=_apigw.LambdaIntegration(base_version)) self.add_cors_options(entity) resource = _cfn.CustomResource( self, "S3ModifyCustomResource", provider=_cfn.CustomResourceProvider.lambda_( _lambda.SingletonFunction( self, "CustomResourceSingleton", uuid="f7d4f730-4ee1-11e8-9c2d-fa7ae01bbebc", code=_lambda.InlineCode(code_body), handler="index.handler", timeout=core.Duration.seconds(300), runtime=_lambda.Runtime.PYTHON_3_7, initial_policy=modify_policy)), properties={ "Bucket": website_bucket.bucket_name, "InvokeUrl": base_api.url, "DynamoDBTables": dynamodb_tables }) core.CfnOutput(self, "PetclinicWebsiteUrl", export_name="PetclinicWebsiteUrl", value=website_bucket.bucket_website_url)
def __init__(self, scope: core.Construct, id: str, props, **kwargs) -> None: super().__init__(scope, id, **kwargs) # Validated require props. required_props_keys = ['CfOriginDomainName', 'Asg', 'HostedZoneName', 'WebsiteDns'] for k in required_props_keys: if k not in props or not props[k]: raise ValueError("Required prop %s is not present" % k) # Create a custom resource that returns the IP of the host behind the autoscaling group asg = props['Asg'] asg_ip_handler = lambda_.Function( self, 'GhostIpHandler', runtime=lambda_.Runtime.PYTHON_3_6, code=lambda_.Code.asset('lambda'), handler='ghost_ip.handler', ) asg_ip_handler.add_to_role_policy( statement=iam.PolicyStatement( actions=['autoscaling:DescribeAutoScalingGroups', 'ec2:DescribeInstances'], resources=['*', '*'], ) ) asg_ip_provider = cr.Provider( self, 'GhostIpProvider', on_event_handler=asg_ip_handler, ) asg_ip_resource = cfn.CustomResource( self, 'GhostIpResource', provider=asg_ip_provider, properties={ 'AsgName': asg.auto_scaling_group_name, 'ts': time.time(), # this makes sure the function is invoked for every CFN update } ) # Create R53 HZ and cf origin domain if 'ExistingHostedZoneId' in props and props['ExistingHostedZoneId']: hz = route53.HostedZone.from_hosted_zone_attributes( self, 'HostedZone', zone_name=props['HostedZoneName'], hosted_zone_id=props['ExistingHostedZoneId'], ) else: hz = route53.HostedZone( self, 'HostedZone', zone_name=props['HostedZoneName'] ) origin_rrset = route53.ARecord( self, 'OriginRecord', target=route53.RecordTarget.from_ip_addresses(asg_ip_resource.get_att_string('GhostIp')), record_name=props['CfOriginDomainName'], zone=hz, ) # Create a CF distro acm_cert = acm.DnsValidatedCertificate( self, 'GhostAcmCert', hosted_zone=hz, domain_name=props['WebsiteDns'], region='us-east-1', ) cf_distro = cf.CloudFrontWebDistribution( self, 'CfDistro', origin_configs=[cf.SourceConfiguration( custom_origin_source=cf.CustomOriginConfig( domain_name=props['CfOriginDomainName'], origin_protocol_policy=cf.OriginProtocolPolicy.HTTP_ONLY, ), behaviors=[cf.Behavior(is_default_behavior=True)], )], alias_configuration=cf.AliasConfiguration( names=[props['WebsiteDns']], acm_cert_ref=acm_cert.certificate_arn, ), default_root_object='', ) # Create the top level website DNS pointing to the CF distro ghost_rrset = route53.CnameRecord( self, 'GhostDns', domain_name=cf_distro.domain_name, zone=hz, record_name=props['WebsiteDns'], )
def __init__(self, scope: core.Construct, id: str, config_params, **kwargs) -> None: super().__init__(scope, id) # Lambda Layer for Redis redis_lib_layer = _lambda.LayerVersion( self, "redisPythonLibLayer", code=_lambda.Code.from_asset( "custom_resources/redis_data_ingester/lambda_src/layer_code/redis_lib_python37.zip" ), compatible_runtimes=[_lambda.Runtime.PYTHON_3_7], license= f"This product uses redis code from https://pypi.org/project/redis/ library", description="Layer to connect to redis using python") # Read LambdaFunction Code try: with open( "custom_resources/redis_data_ingester/lambda_src/redis_data_ingester_lambda_function.py", encoding="utf-8") as fp: code_body = fp.read() except OSError: print('Unable to read UserData script') # Create IAM Permission Statements that are required by the Lambda _instance_role = _iam.Role( self, "webAppClientRoleId", assumed_by=_iam.ServicePrincipal('ec2.amazonaws.com'), managed_policies=[ _iam.ManagedPolicy.from_aws_managed_policy_name( 'AmazonSSMManagedInstanceCore'), _iam.ManagedPolicy.from_aws_managed_policy_name( 'AmazonS3ReadOnlyAccess') ]) roleStmt1 = _iam.PolicyStatement(effect=_iam.Effect.ALLOW, resources=['*'], actions=[ 'ec2:CreateNetworkInterface', 'ec2:DescribeNetworkInterfaces', 'ec2:DeleteNetworkInterface' ]) roleStmt1.sid = "AllowLambdaToManageVPCENI" roleStmt2 = _iam.PolicyStatement( effect=_iam.Effect.ALLOW, resources=[f"{config_params.get('BUCKET').bucket_arn}/*"], actions=['s3:GetObject', 's3:PutObject']) roleStmt2.sid = "AllowS3ObjectReadWriteAccess" redis_data_ingester_fn = _lambda.SingletonFunction( self, "Singleton", uuid="mystique30-4ee1-11e8-9c2d-fa7ae01bbebc", code=_lambda.InlineCode(code_body), handler="index.lambda_handler", timeout=core.Duration.seconds(300), runtime=_lambda.Runtime.PYTHON_3_7, environment={ 'LD_LIBRARY_PATH': '/opt/python', 'REDIS_HOST': config_params.get('REDIS_HOST'), 'REDIS_PORT': config_params.get('REDIS_PORT'), 'BUCKET_NAME': config_params.get('BUCKET_NAME'), 'RECORD_COUNT': config_params.get('RECORD_COUNT') }, layers=[redis_lib_layer], security_group=config_params.get('REDIS_SG'), vpc=config_params.get('VPC'), vpc_subnets=_ec2.SubnetType.PRIVATE) redis_data_ingester_fn.add_to_role_policy(roleStmt1) redis_data_ingester_fn.add_to_role_policy(roleStmt2) resource = cfn.CustomResource( self, "Resource", provider=cfn.CustomResourceProvider.lambda_( redis_data_ingester_fn), properties=kwargs, ) self.response = resource.get_att("Response").to_string()
def __init__(self, scope: cdk.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) ### Parameters bootstrap_script_args = cdk.CfnParameter(self, 'BootstrapScriptArgs', type='String', default='', description='Space seperated arguments passed to the bootstrap script.' ) # create a VPC vpc = ec2.Vpc(self, 'VPC', cidr='10.0.0.0/16', max_azs=99) # create a private and public subnet per vpc selection = vpc.select_subnets( subnet_type=ec2.SubnetType.PRIVATE ) # Output created subnets for i, public_subnet in enumerate(vpc.public_subnets): cdk.CfnOutput(self, 'PublicSubnet%i' % i, value=public_subnet.subnet_id) for i, private_subnet in enumerate(vpc.private_subnets): cdk.CfnOutput(self, 'PrivateSubnet%i' % i, value=private_subnet.subnet_id) cdk.CfnOutput(self, 'VPCId', value=vpc.vpc_id) # Create a Bucket bucket = s3.Bucket(self, "DataRepository") quickstart_bucket = s3.Bucket.from_bucket_name(self, 'QuickStartBucket', 'aws-quickstart') # Upload Bootstrap Script to that bucket bootstrap_script = assets.Asset(self, 'BootstrapScript', path='scripts/bootstrap.sh' ) # Upload parallel cluster post_install_script to that bucket pcluster_post_install_script = assets.Asset(self, 'PclusterPostInstallScript', path='scripts/post_install_script.sh' ) # Setup CloudTrail cloudtrail.Trail(self, 'CloudTrail', bucket=bucket) # Create a Cloud9 instance # Cloud9 doesn't have the ability to provide userdata # Because of this we need to use SSM run command cloud9_instance = cloud9.Ec2Environment(self, 'Cloud9Env', vpc=vpc, instance_type=ec2.InstanceType(instance_type_identifier='c5.large')) cdk.CfnOutput(self, 'URL', value=cloud9_instance.ide_url) # Create a keypair in lambda and store the private key in SecretsManager c9_createkeypair_role = iam.Role(self, 'Cloud9CreateKeypairRole', assumed_by=iam.ServicePrincipal('lambda.amazonaws.com')) c9_createkeypair_role.add_managed_policy(iam.ManagedPolicy.from_aws_managed_policy_name('service-role/AWSLambdaBasicExecutionRole')) # Add IAM permissions to the lambda role c9_createkeypair_role.add_to_policy(iam.PolicyStatement( actions=[ 'ec2:CreateKeyPair', 'ec2:DeleteKeyPair' ], resources=['*'], )) # Lambda for Cloud9 keypair c9_createkeypair_lambda = _lambda.Function(self, 'C9CreateKeyPairLambda', runtime=_lambda.Runtime.PYTHON_3_6, handler='lambda_function.handler', timeout=cdk.Duration.seconds(300), role=c9_createkeypair_role, code=_lambda.Code.asset('functions/source/c9keypair'), # code=_lambda.Code.from_bucket( ) c9_createkeypair_provider = cr.Provider(self, "C9CreateKeyPairProvider", on_event_handler=c9_createkeypair_lambda) c9_createkeypair_cr = cfn.CustomResource(self, "C9CreateKeyPair", provider=c9_createkeypair_provider, properties={ 'ServiceToken': c9_createkeypair_lambda.function_arn } ) #c9_createkeypair_cr.node.add_dependency(instance_id) c9_ssh_private_key_secret = secretsmanager.CfnSecret(self, 'SshPrivateKeySecret', secret_string=c9_createkeypair_cr.get_att_string('PrivateKey') ) # The iam policy has a <REGION> parameter that needs to be replaced. # We do it programmatically so future versions of the synth'd stack # template include all regions. with open('iam/ParallelClusterUserPolicy.json') as json_file: data = json.load(json_file) for s in data['Statement']: if s['Sid'] == 'S3ParallelClusterReadOnly': s['Resource'] = [] for r in region_info.RegionInfo.regions: s['Resource'].append('arn:aws:s3:::{0}-aws-parallelcluster*'.format(r.name)) parallelcluster_user_policy = iam.CfnManagedPolicy(self, 'ParallelClusterUserPolicy', policy_document=iam.PolicyDocument.from_json(data)) # Cloud9 IAM Role cloud9_role = iam.Role(self, 'Cloud9Role', assumed_by=iam.ServicePrincipal('ec2.amazonaws.com')) cloud9_role.add_managed_policy(iam.ManagedPolicy.from_aws_managed_policy_name('AmazonSSMManagedInstanceCore')) cloud9_role.add_managed_policy(iam.ManagedPolicy.from_aws_managed_policy_name('AWSCloud9User')) cloud9_role.add_managed_policy(iam.ManagedPolicy.from_managed_policy_arn(self, 'AttachParallelClusterUserPolicy', parallelcluster_user_policy.ref)) cloud9_role.add_to_policy(iam.PolicyStatement( resources=['*'], actions=[ 'ec2:DescribeInstances', 'ec2:DescribeVolumes', 'ec2:ModifyVolume' ] )) cloud9_role.add_to_policy(iam.PolicyStatement( resources=[c9_ssh_private_key_secret.ref], actions=[ 'secretsmanager:GetSecretValue' ] )) bootstrap_script.grant_read(cloud9_role) pcluster_post_install_script.grant_read(cloud9_role) # Cloud9 User # user = iam.User(self, 'Cloud9User', password=cdk.SecretValue.plain_text('supersecretpassword'), password_reset_required=True) # Cloud9 Setup IAM Role cloud9_setup_role = iam.Role(self, 'Cloud9SetupRole', assumed_by=iam.ServicePrincipal('lambda.amazonaws.com')) cloud9_setup_role.add_managed_policy(iam.ManagedPolicy.from_aws_managed_policy_name('service-role/AWSLambdaBasicExecutionRole')) # Allow pcluster to be run in bootstrap cloud9_setup_role.add_managed_policy(iam.ManagedPolicy.from_managed_policy_arn(self, 'AttachParallelClusterUserPolicySetup', parallelcluster_user_policy.ref)) # Add IAM permissions to the lambda role cloud9_setup_role.add_to_policy(iam.PolicyStatement( actions=[ 'cloudformation:DescribeStackResources', 'ec2:AssociateIamInstanceProfile', 'ec2:AuthorizeSecurityGroupIngress', 'ec2:DescribeInstances', 'ec2:DescribeInstanceStatus', 'ec2:DescribeInstanceAttribute', 'ec2:DescribeIamInstanceProfileAssociations', 'ec2:DescribeVolumes', 'ec2:DesctibeVolumeAttribute', 'ec2:DescribeVolumesModifications', 'ec2:DescribeVolumeStatus', 'ssm:DescribeInstanceInformation', 'ec2:ModifyVolume', 'ec2:ReplaceIamInstanceProfileAssociation', 'ec2:ReportInstanceStatus', 'ssm:SendCommand', 'ssm:GetCommandInvocation', 's3:GetObject', 'lambda:AddPermission', 'lambda:RemovePermission', 'events:PutRule', 'events:DeleteRule', 'events:PutTargets', 'events:RemoveTargets', ], resources=['*'], )) cloud9_setup_role.add_to_policy(iam.PolicyStatement( actions=['iam:PassRole'], resources=[cloud9_role.role_arn] )) cloud9_setup_role.add_to_policy(iam.PolicyStatement( actions=[ 'lambda:AddPermission', 'lambda:RemovePermission' ], resources=['*'] )) # Cloud9 Instance Profile c9_instance_profile = iam.CfnInstanceProfile(self, "Cloud9InstanceProfile", roles=[cloud9_role.role_name]) # Lambda to add Instance Profile to Cloud9 c9_instance_profile_lambda = _lambda.Function(self, 'C9InstanceProfileLambda', runtime=_lambda.Runtime.PYTHON_3_6, handler='lambda_function.handler', timeout=cdk.Duration.seconds(900), role=cloud9_setup_role, code=_lambda.Code.asset('functions/source/c9InstanceProfile'), ) c9_instance_profile_provider = cr.Provider(self, "C9InstanceProfileProvider", on_event_handler=c9_instance_profile_lambda, ) instance_id = cfn.CustomResource(self, "C9InstanceProfile", provider=c9_instance_profile_provider, properties={ 'InstanceProfile': c9_instance_profile.ref, 'Cloud9Environment': cloud9_instance.environment_id, } ) instance_id.node.add_dependency(cloud9_instance) # Lambda for Cloud9 Bootstrap c9_bootstrap_lambda = _lambda.Function(self, 'C9BootstrapLambda', runtime=_lambda.Runtime.PYTHON_3_6, handler='lambda_function.handler', timeout=cdk.Duration.seconds(900), role=cloud9_setup_role, code=_lambda.Code.asset('functions/source/c9bootstrap'), ) c9_bootstrap_provider = cr.Provider(self, "C9BootstrapProvider", on_event_handler=c9_bootstrap_lambda) c9_bootstrap_cr = cfn.CustomResource(self, "C9Bootstrap", provider=c9_bootstrap_provider, properties={ 'Cloud9Environment': cloud9_instance.environment_id, 'BootstrapPath': 's3://%s/%s' % (bootstrap_script.s3_bucket_name, bootstrap_script.s3_object_key), 'BootstrapArguments': bootstrap_script_args, 'VPCID': vpc.vpc_id, 'MasterSubnetID': vpc.public_subnets[0].subnet_id, 'ComputeSubnetID': vpc.private_subnets[0].subnet_id, 'PostInstallScriptS3Url': "".join( ['s3://', pcluster_post_install_script.s3_bucket_name, "/", pcluster_post_install_script.s3_object_key ] ), 'PostInstallScriptBucket': pcluster_post_install_script.s3_bucket_name, 'KeyPairId': c9_createkeypair_cr.ref, 'KeyPairSecretArn': c9_ssh_private_key_secret.ref } ) c9_bootstrap_cr.node.add_dependency(instance_id) c9_bootstrap_cr.node.add_dependency(c9_createkeypair_cr) c9_bootstrap_cr.node.add_dependency(c9_ssh_private_key_secret)
def __init__(self, scope: core.Construct, id: str, ** kwargs) -> None: super().__init__(scope, id) # Read Lambda Function Code): # Read Lambda Code try: with open("cognito_identity_provider/custom_resources/cognito_app_client_secret_retriever/lambda_src/index.py", encoding="utf-8", mode="r") as f: cognito_app_client_secret_retriever_fn_code = f.read() except OSError: print("Unable to read Lambda Function Code") raise # Create IAM Permission Statements that are required by the Lambda roleStmt1 = _iam.PolicyStatement( effect=_iam.Effect.ALLOW, resources=["*"], actions=["cognito-idp:DescribeUserPoolClient"] ) roleStmt1.sid = "AllowLambdaToDescribeCognitoUserPool" roleStmt2 = _iam.PolicyStatement( effect=_iam.Effect.ALLOW, resources=["*"], actions=["secretsmanager:CreateSecret", "secretsmanager:TagResource", "secretsmanager:UpdateSecret", "secretsmanager:DeleteSecret"] ) roleStmt2.sid = "AllowLambdaToAddSecrets" cognito_app_client_secret_retriever_fn = _lambda.SingletonFunction( self, "Singleton", uuid="mystique30-4ee1-11e8-9c2d-fa7ae01bbebc", code=_lambda.InlineCode( cognito_app_client_secret_retriever_fn_code), handler="index.lambda_handler", timeout=core.Duration.seconds(10), runtime=_lambda.Runtime.PYTHON_3_7, reserved_concurrent_executions=1, environment={ "LOG_LEVEL": "INFO", "APP_ENV": "Production" } ) cognito_app_client_secret_retriever_fn.add_to_role_policy(roleStmt1) cognito_app_client_secret_retriever_fn.add_to_role_policy(roleStmt2) # Create Custom Loggroup cognito_app_client_secret_retriever_fn_lg = _logs.LogGroup( self, "cognitoAppClientSecretRetriever", log_group_name=f"/aws/lambda/{cognito_app_client_secret_retriever_fn.function_name}", retention=_logs.RetentionDays.ONE_WEEK, removal_policy=core.RemovalPolicy.DESTROY ) user_pool_secrets_creator = cfn.CustomResource( self, "Resource", provider=cfn.CustomResourceProvider.lambda_( cognito_app_client_secret_retriever_fn ), properties=kwargs, ) self.response = user_pool_secrets_creator.get_att( "user_pool_secrets_arn").to_string()
def __init__( self, scope: core.Construct, id: str, ssh_key_name="mystique-automation-ssh-key", ** kwargs ) -> None: super().__init__(scope, id) # Read Lambda Code:) try: with open("custom_resources/ssh_key_generator/lambda_src/index.py", encoding="utf-8", mode="r" ) as f: ssh_key_generator_fn_code = f.read() except OSError: print("Unable to read Lambda Function Code") raise # Create IAM Permission Statements that are required by the Lambda role_stmt1 = _iam.PolicyStatement( effect=_iam.Effect.ALLOW, resources=["*"], actions=[ "ec2:CreateKeyPair", "ec2:DeleteKeyPair" ] ) role_stmt1.sid = "AllowLambdaToCreateSshKey" role_stmt2 = _iam.PolicyStatement( effect=_iam.Effect.ALLOW, resources=["*"], actions=[ "ssm:PutParameter", "ssm:DeleteParameter", "ssm:GetParameter" ] ) role_stmt2.sid = "AllowLambdaToCreateSSMParameter" ssh_key_generator_fn = _lambda.SingletonFunction( self, "sshKeyGeneratorSingleton", uuid="mystique30-4ee1-11e8-9c2d-fa7ae01bbebc", code=_lambda.InlineCode( ssh_key_generator_fn_code), handler="index.lambda_handler", timeout=core.Duration.seconds(10), runtime=_lambda.Runtime.PYTHON_3_7, reserved_concurrent_executions=1, environment={ "LOG_LEVEL": "INFO", "APP_ENV": "Production", "SSH_KEY_NAME": ssh_key_name }, description="Creates a SSH Key in the region" ) ssh_key_generator_fn.add_to_role_policy(role_stmt1) ssh_key_generator_fn.add_to_role_policy(role_stmt2) # Create Custom Log group ssh_key_generator_fn_lg = _logs.LogGroup( self, "sshKeyGeneratorLogGroup", log_group_name=f"/aws/lambda/{ssh_key_generator_fn.function_name}", retention=_logs.RetentionDays.ONE_WEEK, removal_policy=core.RemovalPolicy.DESTROY ) ssh_key_generator = cfn.CustomResource( self, "sshKeyGeneratorCustomResource", provider=cfn.CustomResourceProvider.lambda_( ssh_key_generator_fn ), properties=kwargs, ) self.response = ssh_key_generator.get_att( "ssh_key_gen_status").to_string()
def __init__(self, scope: cdk.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # Version of ParallelCluster for Cloud9. pcluster_version = cdk.CfnParameter( self, 'ParallelClusterVersion', description= 'Specify a custom parallelcluster version. See https://pypi.org/project/aws-parallelcluster/#history for options.', default='2.8.0', type='String', allowed_values=get_version_list('aws-parallelcluster')) # S3 URI for Config file config = cdk.CfnParameter( self, 'ConfigS3URI', description='Set a custom parallelcluster config file.', default= 'https://notearshpc-quickstart.s3.amazonaws.com/{0}/config.ini'. format(__version__)) # Password password = cdk.CfnParameter( self, 'UserPasswordParameter', description='Set a password for the hpc-quickstart user', no_echo=True) # create a VPC vpc = ec2.Vpc( self, 'VPC', cidr='10.0.0.0/16', gateway_endpoints={ "S3": ec2.GatewayVpcEndpointOptions( service=ec2.GatewayVpcEndpointAwsService.S3), "DynamoDB": ec2.GatewayVpcEndpointOptions( service=ec2.GatewayVpcEndpointAwsService.DYNAMODB) }, max_azs=99) # create a private and public subnet per vpc selection = vpc.select_subnets(subnet_type=ec2.SubnetType.PRIVATE) # Output created subnets for i, public_subnet in enumerate(vpc.public_subnets): cdk.CfnOutput(self, 'PublicSubnet%i' % i, value=public_subnet.subnet_id) for i, private_subnet in enumerate(vpc.private_subnets): cdk.CfnOutput(self, 'PrivateSubnet%i' % i, value=private_subnet.subnet_id) cdk.CfnOutput(self, 'VPCId', value=vpc.vpc_id) # Create a Bucket data_bucket = s3.Bucket(self, "DataRepository") cdk.CfnOutput(self, 'DataRespository', value=data_bucket.bucket_name) cloudtrail_bucket = s3.Bucket(self, "CloudTrailLogs") quickstart_bucket = s3.Bucket.from_bucket_name(self, 'QuickStartBucket', 'aws-quickstart') # Upload Bootstrap Script to that bucket bootstrap_script = assets.Asset(self, 'BootstrapScript', path='scripts/bootstrap.sh') # Upload parallel cluster post_install_script to that bucket pcluster_post_install_script = assets.Asset( self, 'PclusterPostInstallScript', path='scripts/post_install_script.sh') # Upload parallel cluster post_install_script to that bucket pcluster_config_script = assets.Asset(self, 'PclusterConfigScript', path='scripts/config.ini') # Setup CloudTrail cloudtrail.Trail(self, 'CloudTrail', bucket=cloudtrail_bucket) # Create a Cloud9 instance # Cloud9 doesn't have the ability to provide userdata # Because of this we need to use SSM run command cloud9_instance = cloud9.Ec2Environment( self, 'ResearchWorkspace', vpc=vpc, instance_type=ec2.InstanceType( instance_type_identifier='c5.large')) cdk.CfnOutput(self, 'Research Workspace URL', value=cloud9_instance.ide_url) # Create a keypair in lambda and store the private key in SecretsManager c9_createkeypair_role = iam.Role( self, 'Cloud9CreateKeypairRole', assumed_by=iam.ServicePrincipal('lambda.amazonaws.com')) c9_createkeypair_role.add_managed_policy( iam.ManagedPolicy.from_aws_managed_policy_name( 'service-role/AWSLambdaBasicExecutionRole')) # Add IAM permissions to the lambda role c9_createkeypair_role.add_to_policy( iam.PolicyStatement( actions=['ec2:CreateKeyPair', 'ec2:DeleteKeyPair'], resources=['*'], )) # Lambda for Cloud9 keypair c9_createkeypair_lambda = _lambda.Function( self, 'C9CreateKeyPairLambda', runtime=_lambda.Runtime.PYTHON_3_6, handler='lambda_function.handler', timeout=cdk.Duration.seconds(300), role=c9_createkeypair_role, code=_lambda.Code.asset('functions/source/c9keypair'), ) c9_createkeypair_provider = cr.Provider( self, "C9CreateKeyPairProvider", on_event_handler=c9_createkeypair_lambda) c9_createkeypair_cr = cfn.CustomResource( self, "C9CreateKeyPair", provider=c9_createkeypair_provider, properties={'ServiceToken': c9_createkeypair_lambda.function_arn}) #c9_createkeypair_cr.node.add_dependency(instance_id) c9_ssh_private_key_secret = secretsmanager.CfnSecret( self, 'SshPrivateKeySecret', secret_string=c9_createkeypair_cr.get_att_string('PrivateKey')) # The iam policy has a <REGION> parameter that needs to be replaced. # We do it programmatically so future versions of the synth'd stack # template include all regions. with open('iam/ParallelClusterUserPolicy.json') as json_file: data = json.load(json_file) for s in data['Statement']: if s['Sid'] == 'S3ParallelClusterReadOnly': s['Resource'] = [] for r in region_info.RegionInfo.regions: s['Resource'].append( 'arn:aws:s3:::{0}-aws-parallelcluster*'.format( r.name)) parallelcluster_user_policy = iam.CfnManagedPolicy( self, 'ParallelClusterUserPolicy', policy_document=iam.PolicyDocument.from_json(data)) # Cloud9 IAM Role cloud9_role = iam.Role( self, 'Cloud9Role', assumed_by=iam.ServicePrincipal('ec2.amazonaws.com')) cloud9_role.add_managed_policy( iam.ManagedPolicy.from_aws_managed_policy_name( 'AmazonSSMManagedInstanceCore')) cloud9_role.add_managed_policy( iam.ManagedPolicy.from_aws_managed_policy_name('AWSCloud9User')) cloud9_role.add_managed_policy( iam.ManagedPolicy.from_managed_policy_arn( self, 'AttachParallelClusterUserPolicy', parallelcluster_user_policy.ref)) cloud9_role.add_to_policy( iam.PolicyStatement(resources=['*'], actions=[ 'ec2:DescribeInstances', 'ec2:DescribeVolumes', 'ec2:ModifyVolume' ])) cloud9_role.add_to_policy( iam.PolicyStatement(resources=[c9_ssh_private_key_secret.ref], actions=['secretsmanager:GetSecretValue'])) cloud9_role.add_to_policy( iam.PolicyStatement( actions=["s3:Get*", "s3:List*"], resources=[ "arn:aws:s3:::%s/*" % (data_bucket.bucket_name), "arn:aws:s3:::%s" % (data_bucket.bucket_name) ])) bootstrap_script.grant_read(cloud9_role) pcluster_post_install_script.grant_read(cloud9_role) pcluster_config_script.grant_read(cloud9_role) # Admin Group admin_group = iam.Group(self, 'AdminGroup') admin_group.add_managed_policy( iam.ManagedPolicy.from_aws_managed_policy_name( 'AdministratorAccess')) admin_group.add_managed_policy( iam.ManagedPolicy.from_aws_managed_policy_name( 'AWSCloud9Administrator')) # PowerUser Group poweruser_group = iam.Group(self, 'PowerUserGroup') poweruser_group.add_managed_policy( iam.ManagedPolicy.from_aws_managed_policy_name('PowerUserAccess')) poweruser_group.add_managed_policy( iam.ManagedPolicy.from_aws_managed_policy_name( 'AWSCloud9Administrator')) # HPC User user = iam.CfnUser( self, 'Researcher', groups=[admin_group.node.default_child.ref], login_profile=iam.CfnUser.LoginProfileProperty( password_reset_required=True, password=cdk.SecretValue.cfn_parameter(password).to_string())) create_user = cdk.CfnParameter(self, "CreateUser", default="false", type="String", allowed_values=['true', 'false' ]).value_as_string user_condition = cdk.CfnCondition(self, "UserCondition", expression=cdk.Fn.condition_equals( create_user, "true")) user.cfn_options.condition = user_condition cdk.CfnOutput(self, 'UserLoginUrl', value="".join([ "https://", self.account, ".signin.aws.amazon.com/console" ]), condition=user_condition) cdk.CfnOutput(self, 'UserName', value=user.ref, condition=user_condition) # Cloud9 Setup IAM Role cloud9_setup_role = iam.Role( self, 'Cloud9SetupRole', assumed_by=iam.ServicePrincipal('lambda.amazonaws.com')) cloud9_setup_role.add_managed_policy( iam.ManagedPolicy.from_aws_managed_policy_name( 'service-role/AWSLambdaBasicExecutionRole')) # Allow pcluster to be run in bootstrap cloud9_setup_role.add_managed_policy( iam.ManagedPolicy.from_managed_policy_arn( self, 'AttachParallelClusterUserPolicySetup', parallelcluster_user_policy.ref)) # Add IAM permissions to the lambda role cloud9_setup_role.add_to_policy( iam.PolicyStatement( actions=[ 'cloudformation:DescribeStackResources', 'ec2:AssociateIamInstanceProfile', 'ec2:AuthorizeSecurityGroupIngress', 'ec2:DescribeInstances', 'ec2:DescribeInstanceStatus', 'ec2:DescribeInstanceAttribute', 'ec2:DescribeIamInstanceProfileAssociations', 'ec2:DescribeVolumes', 'ec2:DesctibeVolumeAttribute', 'ec2:DescribeVolumesModifications', 'ec2:DescribeVolumeStatus', 'ssm:DescribeInstanceInformation', 'ec2:ModifyVolume', 'ec2:ReplaceIamInstanceProfileAssociation', 'ec2:ReportInstanceStatus', 'ssm:SendCommand', 'ssm:GetCommandInvocation', 's3:GetObject', 'lambda:AddPermission', 'lambda:RemovePermission', 'events:PutRule', 'events:DeleteRule', 'events:PutTargets', 'events:RemoveTargets', 'cloud9:CreateEnvironmentMembership', ], resources=['*'], )) cloud9_setup_role.add_to_policy( iam.PolicyStatement(actions=['iam:PassRole'], resources=[cloud9_role.role_arn])) cloud9_setup_role.add_to_policy( iam.PolicyStatement( actions=['lambda:AddPermission', 'lambda:RemovePermission'], resources=['*'])) # Cloud9 Instance Profile c9_instance_profile = iam.CfnInstanceProfile( self, "Cloud9InstanceProfile", roles=[cloud9_role.role_name]) # Lambda to add Instance Profile to Cloud9 c9_instance_profile_lambda = _lambda.Function( self, 'C9InstanceProfileLambda', runtime=_lambda.Runtime.PYTHON_3_6, handler='lambda_function.handler', timeout=cdk.Duration.seconds(900), role=cloud9_setup_role, code=_lambda.Code.asset('functions/source/c9InstanceProfile'), ) c9_instance_profile_provider = cr.Provider( self, "C9InstanceProfileProvider", on_event_handler=c9_instance_profile_lambda, ) instance_id = cfn.CustomResource(self, "C9InstanceProfile", provider=c9_instance_profile_provider, properties={ 'InstanceProfile': c9_instance_profile.ref, 'Cloud9Environment': cloud9_instance.environment_id, }) instance_id.node.add_dependency(cloud9_instance) # Lambda for Cloud9 Bootstrap c9_bootstrap_lambda = _lambda.Function( self, 'C9BootstrapLambda', runtime=_lambda.Runtime.PYTHON_3_6, handler='lambda_function.handler', timeout=cdk.Duration.seconds(900), role=cloud9_setup_role, code=_lambda.Code.asset('functions/source/c9bootstrap'), ) c9_bootstrap_provider = cr.Provider( self, "C9BootstrapProvider", on_event_handler=c9_bootstrap_lambda) c9_bootstrap_cr = cfn.CustomResource( self, "C9Bootstrap", provider=c9_bootstrap_provider, properties={ 'Cloud9Environment': cloud9_instance.environment_id, 'BootstrapPath': 's3://%s/%s' % (bootstrap_script.s3_bucket_name, bootstrap_script.s3_object_key), 'Config': config, 'VPCID': vpc.vpc_id, 'MasterSubnetID': vpc.public_subnets[0].subnet_id, 'ComputeSubnetID': vpc.private_subnets[0].subnet_id, 'PostInstallScriptS3Url': "".join([ 's3://', pcluster_post_install_script.s3_bucket_name, "/", pcluster_post_install_script.s3_object_key ]), 'PostInstallScriptBucket': pcluster_post_install_script.s3_bucket_name, 'S3ReadWriteResource': data_bucket.bucket_arn, 'S3ReadWriteUrl': 's3://%s' % (data_bucket.bucket_name), 'KeyPairId': c9_createkeypair_cr.ref, 'KeyPairSecretArn': c9_ssh_private_key_secret.ref, 'UserArn': user.attr_arn, 'PclusterVersion': pcluster_version.value_as_string }) c9_bootstrap_cr.node.add_dependency(instance_id) c9_bootstrap_cr.node.add_dependency(c9_createkeypair_cr) c9_bootstrap_cr.node.add_dependency(c9_ssh_private_key_secret) c9_bootstrap_cr.node.add_dependency(data_bucket) enable_budget = cdk.CfnParameter(self, "EnableBudget", default="true", type="String", allowed_values=['true', 'false' ]).value_as_string # Budgets budget_properties = { 'budgetType': "COST", 'timeUnit': "ANNUALLY", 'budgetLimit': { 'amount': cdk.CfnParameter( self, 'BudgetLimit', description= 'The initial budget for this project in USD ($).', default=2000, type='Number').value_as_number, 'unit': "USD", }, 'costFilters': None, 'costTypes': { 'includeCredit': False, 'includeDiscount': True, 'includeOtherSubscription': True, 'includeRecurring': True, 'includeRefund': True, 'includeSubscription': True, 'includeSupport': True, 'includeTax': True, 'includeUpfront': True, 'useAmortized': False, 'useBlended': False, }, 'plannedBudgetLimits': None, 'timePeriod': None, } email = { 'notification': { 'comparisonOperator': "GREATER_THAN", 'notificationType': "ACTUAL", 'threshold': 80, 'thresholdType': "PERCENTAGE", }, 'subscribers': [{ 'address': cdk.CfnParameter( self, 'NotificationEmail', description= 'This email address will receive billing alarm notifications when 80% of the budget limit is reached.', default='*****@*****.**').value_as_string, 'subscriptionType': "EMAIL", }] } overall_budget = budgets.CfnBudget( self, "HPCBudget", budget=budget_properties, notifications_with_subscribers=[email], ) overall_budget.cfn_options.condition = cdk.CfnCondition( self, "BudgetCondition", expression=cdk.Fn.condition_equals(enable_budget, "true"))
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # The code that defines your stack goes here lambda_policies = [ iam.PolicyStatement(actions=[ "logs:CreateLogStream", "logs:PutLogEvents", "logs:CreateLogGroup" ], effect=iam.Effect.ALLOW, resources=[ "arn:aws:logs:" + core.Aws.REGION + ":" + core.Aws.ACCOUNT_ID + ":*" ]), iam.PolicyStatement(actions=["dynamodb:*"], effect=iam.Effect.ALLOW, resources=[ "arn:aws:dynamodb:" + core.Aws.REGION + ":" + core.Aws.ACCOUNT_ID + ":*" ]) ] table = _dynamodb.Table( self, 'VisitTable', partition_key={ 'name': 'id', 'type': _dynamodb.AttributeType.STRING }, removal_policy=core.RemovalPolicy.DESTROY, read_capacity=5, write_capacity=5, ) # Modify the config.js with CF custome resource modify_policy = [ iam.PolicyStatement(actions=["dynamodb:*"], effect=iam.Effect.ALLOW, resources=[ "arn:aws:dynamodb:" + core.Aws.REGION + ":" + core.Aws.ACCOUNT_ID + ":*" ]) ] resource = _cfn.CustomResource( self, "VisitDataImportCustomResource", provider=_cfn.CustomResourceProvider.lambda_( _lambda.SingletonFunction( self, "CustomResourceSingleton", uuid="f7d4f730-4ee1-11e8-9c2d-fa7ae01bbebc", code=self.custom_resource, handler="index.handler", timeout=core.Duration.seconds(300), runtime=_lambda.Runtime.PYTHON_3_7, initial_policy=modify_policy)), properties={"DynamoDBTable": table.table_name}) base_lambda = _lambda.Function( self, 'ApiPetclinicVisitLambda', handler= 'org.springframework.samples.petclinic.visits.StreamLambdaHandler::handleRequest', runtime=_lambda.Runtime.JAVA_8, code=self.lambda_code, memory_size=1024, timeout=core.Duration.seconds(300), initial_policy=lambda_policies, environment={ "DYNAMODB_TABLE_NAME": table.table_name, "SERVER_SERVLET_CONTEXT_PATH": "/api/visit" }) version = base_lambda.add_version(str(round(time.time()))) alias = _lambda.Alias(self, 'ApiPetclinicVisitLambdaAlias', alias_name='Prod', version=version, provisioned_concurrent_executions=5) _deploy.LambdaDeploymentGroup( self, 'ApiPetclinicVisitDeploymentGroup', alias=alias, deployment_config=_deploy.LambdaDeploymentConfig. LINEAR_10_PERCENT_EVERY_1_MINUTE)
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id) # Read Lambda Code:) try: with open( "data_loader_stacks/custom_resources/ddb_data_loader/lambda_src/index.py", encoding="utf-8", mode="r") as f: ddb_data_loader_fn_code = f.read() except OSError: print("Unable to read Lambda Function Code") raise # Create IAM Permission Statements that are required by the Lambda role_stmt1 = _iam.PolicyStatement(effect=_iam.Effect.ALLOW, resources=["*"], actions=[ "dynamodb:PutItem", "dynamodb:DeleteItem", "dynamodb:UpdateItem", ]) role_stmt1.sid = "AllowLambdaToLoadItems" ddb_data_loader_fn = _lambda.SingletonFunction( self, "ddbDataLoaderSingleton", uuid=f"mystique133-0e2efcd4-3a29-e896f670", code=_lambda.InlineCode(ddb_data_loader_fn_code), handler="index.lambda_handler", timeout=core.Duration.seconds(12), runtime=_lambda.Runtime.PYTHON_3_7, reserved_concurrent_executions=1, environment={ "LOG_LEVEL": "INFO", "APP_ENV": "Production" }, description="Load Data into DyanamoDB", function_name=f"ddbDataLoader-{id}") ddb_data_loader_fn.add_to_role_policy(role_stmt1) # Cfn does NOT do a good job in cleaning it up when deleting the stack. Hence commenting this section """ # Create Custom Log group ddb_data_loader_fn_lg = _logs.LogGroup( self, "ddb_data_loaderLogGroup", log_group_name=f"/aws/lambda/{ddb_data_loader_fn.function_name}", retention=_logs.RetentionDays.ONE_WEEK, removal_policy=core.RemovalPolicy.DESTROY ) """ ddb_data_loader = cfn.CustomResource( self, "ddb_data_loaderCustomResource", provider=cfn.CustomResourceProvider.lambda_(ddb_data_loader_fn), properties=kwargs, ) self.response = ddb_data_loader.get_att("data_load_status").to_string()