def allow_lambda_to_access_kinesis(self) -> None: '''Additional permissions needed by Lambda functions to access Kinesis ''' iam_kinesis_statement = aws_iam.PolicyStatement( actions=[ 'firehose:PutRecord', 'firehose:PutRecordBatch', ], effect=aws_iam.Effect.ALLOW, resources=[ f'arn:aws:firehose:{self.env.region}:{self.env.account}:' f'deliverystream/{self.firehose_analytical.delivery_stream_name}', # NOQA f'arn:aws:firehose:{self.env.region}:{self.env.account}:' f'deliverystream/{self.firehose_likes.delivery_stream_name}', ], ) iam_kinesis_document = aws_iam.PolicyDocument(statements=[ iam_kinesis_statement, ], ) policy = aws_iam.ManagedPolicy( self, 'sls-blog-lambda-to-kinesis-permissions', description='Permissions for a Lambda function to put records in ' 'Kinesis Firehose Streams', document=iam_kinesis_document, ) self.api_stack.lambda_streams_reader.role.add_managed_policy(policy) iam_kinesis_statement_apirequests = aws_iam.PolicyStatement( actions=[ 'firehose:PutRecord', 'firehose:PutRecordBatch', ], effect=aws_iam.Effect.ALLOW, resources=[ f'arn:aws:firehose:{self.env.region}:{self.env.account}:' f'deliverystream/{self.firehose_apirequests.delivery_stream_name}', # NOQA ], ) iam_kinesis_document_apirequests = aws_iam.PolicyDocument(statements=[ iam_kinesis_statement_apirequests, ], ) policy_apirequests = aws_iam.ManagedPolicy( self, 'sls-blog-lambda-to-kinesis-permissions-apirequests', description='Permissions for a Lambda function to put records in ' 'Kinesis Firehose Streams', document=iam_kinesis_document_apirequests, ) self.api_stack.lambda_streams_reader.role.add_managed_policy( policy_apirequests)
def create_crawler_permissions(self): iam_glue_principal = iam.ServicePrincipal( service='glue.amazonaws.com', ) iam_crawler_policy = iam.PolicyStatement( actions=["s3:GetObject", "s3:PutObject"], effect=iam.Effect.ALLOW, resources=[self.bucket.bucket_arn + '/kinesis*']) iam_crawler_policy_document = iam.PolicyDocument( statements=[iam_crawler_policy]) crawler_policy = iam.ManagedPolicy( self, 'clickstream_s3_permission', description='Permission from glue to put and get s3 data' 'the Glue "analytical" Database and Table', document=iam_crawler_policy_document, ) crawler_role = iam.Role( self, 'clickstream_crawler_role', assumed_by=iam_glue_principal, managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name( managed_policy_name='service-role/AWSGlueServiceRole'), crawler_policy ]) return crawler_role
def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) with open('config/ec2policy.yaml') as f: policies = yaml.load(f, Loader=yaml.FullLoader) for name, statements in policies['Policies'].items(): policy_statements = [] for statement in statements: if statement['Effect'] == 'Allow': effect = iam.Effect.ALLOW else: effect = iam.Effect.DENY policy_statements.append( iam.PolicyStatement( actions=statement['Action'], #conditions = effect=effect, resources=statement['Resources'], #sid = statement['sid'], )) this_policy = iam.ManagedPolicy( self, name, #description = managed_policy_name=name, ) for statement in policy_statements: this_policy.add_statements(statement)
def copy_from_assests_bucket_to_custom_bucket(self, construct_id, asset_bucket, file_name, s3_custom_bucket): asset_bucket_object = s3.Bucket.from_bucket_name( self, "AssetBucketObject", asset_bucket.s3_bucket_name) # Custom Resource Creation to Copy from Asset Bucket to Custom Bucket custom_resource_policy = AwsCustomResourcePolicy.from_sdk_calls( resources=[ f"{asset_bucket_object.bucket_arn}/*", f"{s3_custom_bucket.bucket_arn}/*" ]) custom_resource_lambda_role = _iam.Role( scope=self, id=f'{construct_id}-CustomResourceLambdaRole', assumed_by=_iam.ServicePrincipal('lambda.amazonaws.com'), managed_policies=[ _iam.ManagedPolicy.from_aws_managed_policy_name( "service-role/AWSLambdaBasicExecutionRole"), _iam.ManagedPolicy( scope=self, id=f'{construct_id}-CustomResourceLambdaPolicy', managed_policy_name="AssetsBucketAccessPolicy", statements=[ _iam.PolicyStatement(resources=[ f"{asset_bucket_object.bucket_arn}/*", f"{s3_custom_bucket.bucket_arn}/*" ], actions=[ "s3:List*", "s3:PutObject", "s3:GetObject" ]) ]) ]) on_create = AwsSdkCall(action='copyObject', service='S3', physical_resource_id=PhysicalResourceId.of( f'{asset_bucket.s3_bucket_name}'), parameters={ "Bucket": s3_custom_bucket.bucket_name, "CopySource": asset_bucket.s3_bucket_name + '/' + asset_bucket.s3_object_key, "Key": file_name }) custom_resource_creation = AwsCustomResource( scope=self, id='CustomResourceSyncWithS3', policy=custom_resource_policy, log_retention=logs.RetentionDays.ONE_WEEK, on_create=on_create, on_update=on_create, role=custom_resource_lambda_role, timeout=cdk.Duration.seconds(300)) return custom_resource_creation
def get_managed_policy(self, kms_key_arn): s3 = IamService.__get_s3_policy_statement(self) kms = IamService.__get_kms_statements(self) managePolicy = iam.ManagedPolicy( self, 'ssl-sqs-kms-managed-polciy', managed_policy_name='ssl-sqs-kms-managed-polciy', statements=[s3, kms]) return managePolicy
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # create VPC w/ public and private subnets in 2 AZs # this also creates NAT Gateways in our public subnets vpc = ec2.Vpc(self, "NAT_Vpc", max_azs=2) # define the IAM role that will allow the EC2 instance to communicate with SSM role = iam.Role(self, "Role", assumed_by=iam.ServicePrincipal("ec2.amazonaws.com")) # arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore role.add_managed_policy( iam.ManagedPolicy( self, id='mp', managed_policy_name='AmazonSSMManagedInstanceCore', statements=[ iam.PolicyStatement(actions=['*'], resources=['*']) ])) # define user data script to update server software ssma_user_data = ec2.UserData.for_linux() ssma_user_data.add_commands('sudo yum update -y') # define user data script to create metadata.sh script ssma_user_data.add_commands('sudo touch metadata.sh') ssma_user_data.add_commands('sudo chmod 777 metadata.sh') ssma_user_data.add_commands( "sudo echo 'curl http://169.254.169.254/latest/meta-data/$1' > metadata.sh" ) ssma_user_data.add_commands("sudo echo 'VAR=' >> metadata.sh") ssma_user_data.add_commands("sudo echo 'echo $VAR' >> metadata.sh") # launch an EC2 instance in one of the private subnets instance = ec2.Instance( self, "PrivateInstance", vpc=vpc, instance_type=ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE2, ec2.InstanceSize.MICRO), machine_image=ec2.AmazonLinuxImage(), vpc_subnets={'subnet_type': ec2.SubnetType.PRIVATE}, role=role, user_data=ssma_user_data) # launch an EC2 instance in one of the public subnets instance = ec2.Instance( self, "PublicInstance", vpc=vpc, instance_type=ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE2, ec2.InstanceSize.MICRO), machine_image=ec2.AmazonLinuxImage(), vpc_subnets={'subnet_type': ec2.SubnetType.PUBLIC}, role=role, user_data=ssma_user_data)
def create_firehose_role(self): # Principal firehose_service_principal = iam.ServicePrincipal( service='firehose.amazonaws.com', ) # Statement iam_analytical_statement = iam.PolicyStatement( actions=[ 'glue:GetTable', 'glue:GetTableVersion', 'glue:GetTableVersions', ], effect=iam.Effect.ALLOW, resources=[ self.glue_db_analytical.catalog_arn, self.glue_db_analytical.database_arn, self.glue_table_analytical.table_arn, ], ) # Statement iam_datastreams_firehose_statement = iam.PolicyStatement( actions=['kinesis:*'], effect=iam.Effect.ALLOW, resources=[self.stream_lambda.kinesis_stream.stream_arn]) # Statement iam_s3_firehose_statement = iam.PolicyStatement( actions=['s3:*'], effect=iam.Effect.ALLOW, resources=[self.bucket.bucket_arn]) # Document with previous statements analytical_policy_document = iam.PolicyDocument(statements=[ iam_analytical_statement, iam_datastreams_firehose_statement, iam_s3_firehose_statement ], ) # Creation of a policy using the document analytical_policy = iam.ManagedPolicy( self, 'sls-blog-analytical-glue-permissions', description='Permissions for a Kinesis Firehose Stream to access ' 'the Glue "analytical" Database and Table', document=analytical_policy_document, ) # Creating the Role using the policy iam_role_firehose_analytical = iam.Role( self, 'self-firehose-to-s3', assumed_by=firehose_service_principal, managed_policies=[ analytical_policy, ], ) return iam_role_firehose_analytical
def init_bot_group(self): statement = iam.PolicyStatement(effect=iam.Effect.ALLOW) statement.add_actions("ecs:ListClusters", "ecs:ListServices", "ecs:DescribeServices", "ecs:ListTasks", "ecs:DescribeTasks", "ecs:UpdateService", "ec2:DescribeNetworkInterfaces") statement.add_all_resources() group = iam.Group(self, "minebot-group") group.add_managed_policy( iam.ManagedPolicy(self, "minebot-start-stop-policy", statements=[statement])) return group
def _add_codebuild_project_runner_permissions(self, role: aws_iam.Role): build_exec_policy = aws_iam.ManagedPolicy( self, "AmplifyAndroidBuildExecutorPolicy", managed_policy_name=f"AmplifyAndroidBuildExecutorPolicy", description= "Policy used by the CodeBuild role that executes builds.", statements=[ aws_iam.PolicyStatement( actions=self.CODE_BUILD_AMPLIFY_ACTIONS, effect=aws_iam.Effect.ALLOW, resources=["*"]), ]) build_exec_policy.attach_to_role(role)
def _add_devicefarm_test_runner_permissions_to_role( self, role: aws_iam.Role): df_runner_policy = aws_iam.ManagedPolicy( self, "AmplifyAndroidDeviceFarmTestRunnerPolicy", managed_policy_name=f"AmplifyAndroidDeviceFarmTestRunnerPolicy", description= "Policy used by the CodePipeline to trigger DeviceFarm test runs.", statements=[ aws_iam.PolicyStatement( actions=self.DEVICE_FARM_TEST_RUNNER_ACTIONS, effect=aws_iam.Effect.ALLOW, resources=["*"]), ]) df_runner_policy.attach_to_role(role)
def __init__(self, scope: core.App, id: str, props, **kwargs) -> None: super().__init__(scope, id, **kwargs) required_props = ['github_source'] for prop in required_props: if prop not in props: raise RuntimeError(f"Parameter {prop} is required.") codebuild_project_name_prefix = props['codebuild_project_name_prefix'] github_source = props['github_source'] owner = github_source['owner'] repo = github_source['repo'] base_branch = github_source['base_branch'] buildspec_path = props[ 'buildspec_path'] if 'buildspec_path' in props else None create_webhooks = props['create_webhooks'] policy = aws_iam.ManagedPolicy( self, "SecretsAccessPolicy", managed_policy_name= f"{codebuild_project_name_prefix}-SecretsAccessPolicy", description= "Policy used by the CodeBuild role to access secrets when uploading to Sonatype", statements=[ aws_iam.PolicyStatement( actions=["secretsmanager:GetSecretValue"], effect=aws_iam.Effect.ALLOW, resources=[ f"arn:aws:secretsmanager:{self.region}:{self.account}:secret:awsmobilesdk/android/signing*", f"arn:aws:secretsmanager:{self.region}:{self.account}:secret:awsmobilesdk/android/sonatype*" ]) ]) publisher = MavenPublisher( self, "ReleasePublisher", project_name=f"{codebuild_project_name_prefix}-ReleasePublisher", github_owner=owner, github_repo=repo, base_branch=base_branch, buildspec_path=buildspec_path, create_webhooks=create_webhooks) policy.attach_to_role(publisher.role)
def provision_iam_policy(self, name: str): self.policy = iam.ManagedPolicy( self.scope, "S3", managed_policy_name=f"{name}-S3", statements=[ iam.PolicyStatement( actions=[ "s3:ListBucket", "s3:GetBucketLocation", "s3:ListBucketMultipartUploads", ], resources=["*"], ), self.s3_api_statement, ], )
def _create_codebuild_project(self, id: str): pipeline_project = aws_codebuild.PipelineProject( self, id, environment=aws_codebuild.BuildEnvironment( build_image=aws_codebuild.LinuxBuildImage.AMAZON_LINUX_2_3, privileged=True, compute_type=aws_codebuild.ComputeType.LARGE), build_spec=aws_codebuild.BuildSpec.from_source_filename( filename='scripts/apk-builder-buildspec.yml')) build_exec_policy = aws_iam.ManagedPolicy( self, "AmplifyAndroidBuildExecutorPolicy", managed_policy_name=f"AmplifyAndroidBuildExecutorPolicy", description= "Policy used by the CodeBuild role that executes builds.", statements=[ aws_iam.PolicyStatement( actions=self.CODE_BUILD_AMPLIFY_ACTIONS, effect=aws_iam.Effect.ALLOW, resources=["*"]), ]) build_exec_policy.attach_to_role(pipeline_project.role) return pipeline_project
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) SLACK_WEBHOOK_URL = self.node.try_get_context("webhook_url") if not SLACK_WEBHOOK_URL: logger.error( f"Required context variables for {id} were not provided!") else: # Create explicit deny iam deny_iam = iam.ManagedPolicy( self, f"InLam01DenyPolicy", managed_policy_name="lambdaDeny", statements=[ iam.PolicyStatement( effect=iam.Effect.DENY, actions=["iam:*"], resources=[ "arn:aws:iam::*:saml-provider/*", "arn:aws:iam::*:policy/*", "arn:aws:iam::*:oidc-provider/*", "arn:aws:iam::*:instance-profile/*", "arn:aws:iam::*:user/*", "arn:aws:iam::*:role/*", "arn:aws:iam::*:server-certificate/*", "arn:aws:iam::*:sms-mfa/*", "arn:aws:iam::*:group/*", "arn:aws:iam::*:mfa/*/*" ]), iam.PolicyStatement( effect=iam.Effect.DENY, actions=[ "iam:ListPolicies", "iam:DeleteAccountPasswordPolicy", "iam:ListSAMLProviders", "lambda:ListFunctions", "iam:ListServerCertificates", "iam:ListPoliciesGrantingServiceAccess", "iam:ListRoles", "lambda:GetAccountSettings", "lambda:CreateEventSourceMapping", "iam:ListVirtualMFADevices", "iam:SetSecurityTokenServicePreferences", "iam:ListOpenIDConnectProviders", "iam:UpdateAccountPasswordPolicy", "iam:CreateAccountAlias", "lambda:ListEventSourceMappings", "iam:ListAccountAliases", "iam:ListUsers", "lambda:ListLayerVersions", "lambda:ListLayers", "iam:ListGroups", "iam:DeleteAccountAlias", "iam:GetAccountSummary" ], resources=["*"]), iam.PolicyStatement( effect=iam.Effect.DENY, actions=["lambda:*"], resources=[ "arn:aws:lambda:*:*:function:*", "arn:aws:lambda:*:*:layer:*", "arn:aws:lambda:*:*:event-source-mapping:*", "arn:aws:lambda:*:*:function:*:*", ]), ]) # Create lambda that attaches explicit deny lambda_dir_path = os.path.join(os.getcwd(), "ir_cdk_stacks", "in_lam_01") lockdown_lambda = _lambda.Function( self, "InLam01LockdownFunction", runtime=_lambda.Runtime.PYTHON_3_8, handler="response_lambda.lambda_handler", code=_lambda.Code.from_asset(lambda_dir_path), environment={ "webhook_url": SLACK_WEBHOOK_URL, "lambdaDenyIAM": deny_iam.managed_policy_arn }) lockdown_lambda.add_to_role_policy( iam.PolicyStatement( actions=[ "iam:AttachUserPolicy", ], effect=iam.Effect.ALLOW, resources=["*"], )) # set up cloudwatch event for lambda invokes cw_hook = cw_event.Rule( self, f"InLam01Event", description="Monitor Lambda Invokes", event_pattern=cw_event.EventPattern(source=["aws.lambda"]), rule_name="lambdaMonitor", targets=[targets.LambdaFunction(handler=lockdown_lambda)])
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) CLUSTER_NAME = self.node.try_get_context("cluster_name") NOTIFY_EMAIL = self.node.try_get_context("notify_email") SLACK_WEBHOOK_URL = self.node.try_get_context("webhook_url") WHITE_LIST_GROUP = self.node.try_get_context("white_list_group") if (not CLUSTER_NAME or not NOTIFY_EMAIL or not SLACK_WEBHOOK_URL or not WHITE_LIST_GROUP): logger.error( f"Required context variables for {id} were not provided!") else: # Create explicit deny policy policy = iam.ManagedPolicy( self, "InAur02RdsDenyPolicy", managed_policy_name="InAur02RdsDenyPolicy", statements=[ iam.PolicyStatement( actions=["rds:*", "iam:*"], effect=iam.Effect.DENY, resources=["*"], ) ], ) # Create lambda function lambda_dir_path = os.path.join(os.getcwd(), "ir_cdk_stacks", "in_aur_02") lambda_func = _lambda.Function( self, "InAur02ResponseFunction", runtime=_lambda.Runtime.PYTHON_3_8, handler="response_lambda.lambda_handler", code=_lambda.Code.from_asset(lambda_dir_path), environment={ "webhook_url": SLACK_WEBHOOK_URL, "policy_arn": policy.managed_policy_arn, "cluster_name": CLUSTER_NAME, "white_list_group": WHITE_LIST_GROUP, }, ) # Assign permissions to response lambda lambda_func.add_to_role_policy( iam.PolicyStatement( actions=["iam:AttachUserPolicy", "iam:GetGroup"], effect=iam.Effect.ALLOW, resources=["*"], )) # Create new SNS topic topic = sns.Topic(self, "InAur02DetectionTopic") # Add email subscription topic.add_subscription(subs.EmailSubscription(NOTIFY_EMAIL)) # Create new event rule to trigger lambda # when there are AWS RDS API calls events.Rule( self, "InAur02DetectionEventRule", event_pattern=events.EventPattern( source=["aws.rds"], detail_type=["AWS API Call via CloudTrail"], detail={"eventSource": ["rds.amazonaws.com"]}, ), targets=[ targets.LambdaFunction(handler=lambda_func), targets.SnsTopic(topic), ], )
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) custom_allow_policy = iam.ManagedPolicy( self, "socialistir-custom-shub-write", managed_policy_name="socialistir-custom-shub-write", statements=[ iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ "s3:PutAnalyticsConfiguration", "s3:PutAccelerateConfiguration", "s3:DeleteObjectVersion", "s3:RestoreObject", "s3:CreateBucket", "s3:ReplicateObject", "s3:PutEncryptionConfiguration", "s3:DeleteBucketWebsite", "s3:AbortMultipartUpload", "s3:PutLifecycleConfiguration", "s3:DeleteObject", "s3:DeleteBucket", "s3:PutBucketVersioning", "s3:PutMetricsConfiguration", "s3:PutReplicationConfiguration", "s3:PutObjectLegalHold", "s3:PutBucketCORS", "s3:PutInventoryConfiguration", "s3:PutObject", "s3:PutBucketNotification", "s3:PutBucketWebsite", "s3:PutBucketRequestPayment", "s3:PutObjectRetention", "s3:PutBucketLogging", "s3:PutBucketObjectLockConfiguration", "s3:ReplicateDelete" ], resources=[ "arn:aws:s3:::socialistir-prod", "arn:aws:s3:::socialistir-prod/*" ]) ]) custom_deny_policy = iam.ManagedPolicy( self, "S3-Custom-Shub-Deny_Write", managed_policy_name="S3-Custom-Shub-Deny_Write", statements=[ iam.PolicyStatement( effect=iam.Effect.DENY, actions=[ "s3:PutAnalyticsConfiguration", "s3:PutAccelerateConfiguration", "s3:PutMetricsConfiguration", "s3:PutReplicationConfiguration", "s3:CreateBucket", "s3:PutBucketCORS", "s3:PutInventoryConfiguration", "s3:PutEncryptionConfiguration", "s3:PutBucketNotification", "s3:DeleteBucketWebsite", "s3:PutBucketWebsite", "s3:PutBucketRequestPayment", "s3:PutBucketLogging", "s3:PutLifecycleConfiguration", "s3:PutBucketObjectLockConfiguration", "s3:DeleteBucket", "s3:PutBucketVersioning", "s3:ReplicateObject", "s3:PutObject", "s3:AbortMultipartUpload", "s3:PutObjectRetention", "s3:DeleteObjectVersion", "s3:RestoreObject", "s3:PutObjectLegalHold", "s3:DeleteObject", "s3:ReplicateDelete" ], resources=[ "arn:aws:s3:::socialistir-prod", "arn:aws:s3:::socialistir-prod/*" ]) ]) devgroup1 = iam.Group(self, "Developer-socialistir", group_name="Developer-socialistir", managed_policies=[custom_allow_policy]) devgroup2 = iam.Group(self, "Developer-teamA", group_name="Developer-teamA") ############ This section should be executed only once in lifetime as ############################################ ############ AWS CDK does not support destion of S3 buckects yet and will ############################################ ############ throw errors on subsequent deploy or destruct or rollback ############################################ ############ S3 bucts are global across all AWS accounts ############################################ # bucket = s3.Bucket(self, id='socialistir-prod', bucket_name='socialistir-prod', versioned=True, website_error_document='index.html', website_index_document='index.html') trail = cloudtrail.Trail(self, "S3-Write-Operation-Trail") trail.add_s3_event_selector( ["arn:aws:s3:::socialistir-prod/"], include_management_events=True, read_write_type=cloudtrail.ReadWriteType.WRITE_ONLY) # ###################################################################################################################### topic = sns.Topic(self, "S3-Notification-Write", topic_name="S3-Notification-Write") topic.add_subscription(subs.EmailSubscription('*****@*****.**')) ep = { "source": ["aws.s3"], "detail": { "eventSource": ["s3.amazonaws.com"], "eventName": [ "ListObjects", "ListObjectVersions", "PutObject", "GetObject", "HeadObject", "CopyObject", "GetObjectAcl", "PutObjectAcl", "CreateMultipartUpload", "ListParts", "UploadPart", "CompleteMultipartUpload", "AbortMultipartUpload", "UploadPartCopy", "RestoreObject", "DeleteObject", "DeleteObjects", "GetObjectTorrent", "SelectObjectContent", "PutObjectLockRetention", "PutObjectLockLegalHold", "GetObjectLockRetention", "GetObjectLockLegalHold" ], "requestParameters": { "bucketName": ["socialistir-prod"] } } } rule = events.Rule(self, "Shub-s3", description='Rule created by CDK for S3 monitoring', enabled=True, rule_name="Shub-s3", event_pattern=ep) lambda_dir_path = os.path.join(os.getcwd(), "ir_cdk_stacks", "in_s3_01") response_lambda = _lambda.Function( self, "S3WriteIR", runtime=_lambda.Runtime.PYTHON_3_8, handler="lambda_function.lambda_handler", code=_lambda.Code.from_asset(lambda_dir_path), function_name="S3WriteIR") response_lambda.add_to_role_policy( iam.PolicyStatement( actions=[ "iam:*", "organizations:DescribeAccount", "organizations:DescribeOrganization", "organizations:DescribeOrganizationalUnit", "organizations:DescribePolicy", "organizations:ListChildren", "organizations:ListParents", "organizations:ListPoliciesForTarget", "organizations:ListRoots", "organizations:ListPolicies", "organizations:ListTargetsForPolicy" ], effect=iam.Effect.ALLOW, resources=["*"], )) response_lambda.add_to_role_policy( iam.PolicyStatement( actions=[ "iam:*", "organizations:DescribeAccount", "organizations:DescribeOrganization", "organizations:DescribeOrganizationalUnit", "organizations:DescribePolicy", "organizations:ListChildren", "organizations:ListParents", "organizations:ListPoliciesForTarget", "organizations:ListRoots", "organizations:ListPolicies", "organizations:ListTargetsForPolicy" ], effect=iam.Effect.ALLOW, resources=["*"], )) response_lambda.add_to_role_policy( iam.PolicyStatement( actions=["s3:*"], effect=iam.Effect.ALLOW, resources=["*"], )) response_lambda.add_to_role_policy( iam.PolicyStatement( actions=["sns:*"], effect=iam.Effect.ALLOW, resources=["*"], )) rule.add_target(event_target.LambdaFunction(response_lambda))
def create_service_role(self) -> iam.Role: return iam.Role( self, "EMRStudioServiceRole", assumed_by=iam.ServicePrincipal("elasticmapreduce.amazonaws.com"), managed_policies=[ iam.ManagedPolicy( self, "EMRStudioServiceRolePolicy", statements=[ iam.PolicyStatement( sid="AllowEMRReadOnlyActions", actions=[ "elasticmapreduce:ListInstances", "elasticmapreduce:DescribeCluster", "elasticmapreduce:ListSteps", ], resources=["*"], ), iam.PolicyStatement( sid="AllowEC2ENIActionsWithEMRTags", actions=[ "ec2:CreateNetworkInterfacePermission", "ec2:DeleteNetworkInterface", ], resources=[ cdk.Stack.format_arn( self, service="ec2", resource="network-interface", resource_name="*", ) ], conditions={ "StringEquals": { "aws:ResourceTag/for-use-with-amazon-emr-managed-policies": "true" } }, ), iam.PolicyStatement( sid="AllowEC2ENIAttributeAction", actions=["ec2:ModifyNetworkInterfaceAttribute"], resources=[ cdk.Stack.format_arn( self, service="ec2", resource=name, resource_name="*", ) for name in [ "instance", "network-interface", "security-group", ] ], ), iam.PolicyStatement( sid="AllowEC2SecurityGroupActionsWithEMRTags", actions=[ "ec2:AuthorizeSecurityGroupEgress", "ec2:AuthorizeSecurityGroupIngress", "ec2:RevokeSecurityGroupEgress", "ec2:RevokeSecurityGroupIngress", "ec2:DeleteNetworkInterfacePermission", ], resources=["*"], conditions={ "StringEquals": { "aws:ResourceTag/for-use-with-amazon-emr-managed-policies": "true" } }, ), iam.PolicyStatement( sid= "AllowDefaultEC2SecurityGroupsCreationWithEMRTags", actions=["ec2:CreateSecurityGroup"], resources=[ cdk.Stack.format_arn( self, service="ec2", resource="security-group", resource_name="*", ) ], conditions={ "StringEquals": { "aws:RequestTag/for-use-with-amazon-emr-managed-policies": "true" } }, ), iam.PolicyStatement( sid= "AllowDefaultEC2SecurityGroupsCreationInVPCWithEMRTags", actions=["ec2:CreateSecurityGroup"], resources=[ cdk.Stack.format_arn( self, service="ec2", resource="vpc", resource_name="*", ) ], conditions={ "StringEquals": { "aws:ResourceTag/for-use-with-amazon-emr-managed-policies": "true" } }, ), iam.PolicyStatement( sid= "AllowAddingEMRTagsDuringDefaultSecurityGroupCreation", actions=["ec2:CreateTags"], resources=[ cdk.Stack.format_arn( self, service="ec2", resource="security-group", resource_name="*", ) ], conditions={ "StringEquals": { "aws:RequestTag/for-use-with-amazon-emr-managed-policies": "true", "ec2:CreateAction": "CreateSecurityGroup", } }, ), iam.PolicyStatement( sid="AllowEC2ENICreationWithEMRTags", actions=["ec2:CreateNetworkInterface"], resources=[ cdk.Stack.format_arn( self, service="ec2", resource="network-interface", resource_name="*", ) ], conditions={ "StringEquals": { "aws:RequestTag/for-use-with-amazon-emr-managed-policies": "true" } }, ), iam.PolicyStatement( sid= "AllowEC2ENICreationInSubnetAndSecurityGroupWithEMRTags", actions=["ec2:CreateNetworkInterface"], resources=[ cdk.Stack.format_arn( self, service="ec2", resource=name, resource_name="*", ) for name in ["subnet", "security-group"] ], conditions={ "StringEquals": { "aws:ResourceTag/for-use-with-amazon-emr-managed-policies": "true" } }, ), iam.PolicyStatement( sid="AllowAddingTagsDuringEC2ENICreation", actions=["ec2:CreateTags"], resources=[ cdk.Stack.format_arn( self, service="ec2", resource="network-interface", resource_name="*", ) ], conditions={ "StringEquals": { "ec2:CreateAction": "CreateNetworkInterface" } }, ), iam.PolicyStatement( sid="AllowEC2ReadOnlyActions", actions=[ "ec2:DescribeSecurityGroups", "ec2:DescribeNetworkInterfaces", "ec2:DescribeTags", "ec2:DescribeInstances", "ec2:DescribeSubnets", "ec2:DescribeVpcs", ], resources=["*"], ), iam.PolicyStatement( sid="AllowSecretsManagerReadOnlyActionsWithEMRTags", actions=["secretsmanager:GetSecretValue"], resources=[ cdk.Stack.format_arn( self, service="secretsmanager", resource="secret", sep=":", resource_name="*", ) ], conditions={ "StringEquals": { "aws:ResourceTag/for-use-with-amazon-emr-managed-policies": "true" } }, ), iam.PolicyStatement( sid="S3permission", actions=[ "s3:PutObject", "s3:GetObject", "s3:GetEncryptionConfiguration", "s3:ListBucket", "s3:DeleteObject", ], resources=["arn:aws:s3:::*"], ), ], ) ], )
def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) container_image = aws_ecr_assets.DockerImageAsset( self, 'dockerImage', directory='./unicorn_api_service') # noinspection PyTypeChecker unicorn_service = ecs_patterns.ApplicationLoadBalancedFargateService( self, 'unicornService', memory_limit_mib=1024, cpu=512, task_image_options={ 'image': ecs.ContainerImage.from_docker_image_asset( asset=container_image), 'container_port': 80, 'enable_logging': True, 'environment': { 'FLASK_DEBUG': '1', 'FLASK_ENV': 'development', 'FLASK_APP': '/app/app.py', 'PYTHONUNBUFFERED': '1' } }) unicorn_service.task_definition.execution_role.add_managed_policy( iam.ManagedPolicy.from_managed_policy_arn( self, 'ecsExecutionRole', managed_policy_arn= 'arn:aws:iam::aws:policy/service-role/AmazonECSTaskExecutionRolePolicy' )) unicorn_service.target_group.configure_health_check(path="/health") unicorn_service.task_definition.task_role.add_managed_policy( iam.ManagedPolicy.from_aws_managed_policy_name( 'AmazonSSMManagedInstanceCore')) user_policy = iam.ManagedPolicy( self, 'userPolicy', document=iam.PolicyDocument(statements=[ iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=['ecs:UpdateService'], resources=[ 'arn:aws:ecs:*:*:service/*/cloud-debug-*', unicorn_service.service.service_arn ]), iam.PolicyStatement(effect=iam.Effect.ALLOW, actions=[ 'iam:GetRole', 'iam:ListRoles', 'iam:SimulatePrincipalPolicy' ], resources=['*']), iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=['iam:PassRole'], resources=[ unicorn_service.task_definition.execution_role. role_arn, unicorn_service.task_definition.task_role.role_arn ], conditions={ 'StringEquals': { "iam:PassedToService": "ecs-tasks.amazonaws.com" } }), iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=['iam:PassRole'], resources=[ 'arn:aws:iam::*:role/aws-service-role/ecs.amazonaws.com/AWSServiceRoleForECS' ]), iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ 's3:CreateBucket', 's3:GetObject', 's3:PutObject', 's3:DeleteObject', 's3:ListBucket' ], resources=['arn:aws:s3:::do-not-delete-cloud-debug-*']), iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ 'ecs:ListClusters', 'ecs:ListServices', 'ecs:DescribeServices', 'ecs:ListTasks', 'ecs:DescribeTasks', 'ecs:DescribeTaskDefinition', 'elasticloadbalancing:DescribeListeners', 'elasticloadbalancing:DescribeRules', 'elasticloadbalancing:DescribeTargetGroups', 'ecr:GetAuthorizationToken', 'ecr:BatchCheckLayerAvailability', 'ecr:GetDownloadUrlForLayer', 'ecr:BatchGetImage' ], resources=['*']), iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=['logs:CreateLogGroup', 'logs:CreateLogStream'], resources=['arn:aws:logs:*:*:cloud-debug*']), iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=['ecs:CreateService', 'ecs:DeleteService'], resources=['arn:aws:ecs:*:*:service/*/cloud-debug*']), iam.PolicyStatement(effect=iam.Effect.ALLOW, actions=['ecs:RegisterTaskDefinition'], resources=['*']), iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ 'elasticloadbalancing:ModifyListener', 'elasticloadbalancing:ModifyRule', 'elasticloadbalancing:ModifyTargetGroupAttributes' ], resources=['*']), iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ 'elasticloadbalancing:CreateTargetGroup', 'elasticloadbalancing:DeleteTargetGroup' ], resources=[ 'arn:aws:elasticloadbalancing:*:*:targetgroup/cloud-debug*' ]), iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ 'ssm:StartSession', 'ssm:TerminateSession', 'ssm:ResumeSession', 'ssm:DescribeSessions', 'ssm:GetConnectionStatus' ], resources=['*']), iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ 'application-autoscaling:RegisterScalableTarget', 'application-autoscaling:DeregisterScalableTarget', 'application-autoscaling:DescribeScalableTargets' ], resources=['*']), ]))
def build_team_role( scope: core.Construct, context: "Context", team_name: str, policy_names: List[str], scratch_bucket: s3.IBucket, team_kms_key: kms.Key, session_timeout: core.Duration, ) -> iam.Role: env_name = context.name partition = core.Aws.PARTITION account = core.Aws.ACCOUNT_ID region = core.Aws.REGION lake_role_name: str = f"orbit-{env_name}-{team_name}-{region}-role" role_prefix: str = f"/{context.role_prefix}/" if context.role_prefix else "/" kms_keys = [team_kms_key.key_arn] scratch_bucket_kms_key = IamBuilder.get_kms_key_scratch_bucket( context=context) if scratch_bucket_kms_key: kms_keys.append(scratch_bucket_kms_key) lake_operational_policy = iam.ManagedPolicy( scope=scope, id="lake_operational_policy", managed_policy_name= f"orbit-{env_name}-{team_name}-{region}-user-access", statements=[ iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ "s3:*", ], resources=[ f"arn:{partition}:s3:::sagemaker-{region}-{account}", f"arn:{partition}:s3:::sagemaker-{region}-{account}/*", scratch_bucket.bucket_arn, f"{scratch_bucket.bucket_arn}/{team_name}/*", ], ), iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=["s3:List*", "s3:Get*", "s3:Put*"], resources=[ f"arn:{partition}:s3:::{context.toolkit.s3_bucket}", f"arn:{partition}:s3:::{context.toolkit.s3_bucket}/samples/*", f"arn:{partition}:s3:::{context.toolkit.s3_bucket}/codeseeder/*", f"arn:{partition}:s3:::{context.toolkit.s3_bucket}/teams/{team_name}/*", f"arn:{partition}:s3:::{context.toolkit.s3_bucket}/helm/repositories/env/*", f"arn:{partition}:s3:::{context.toolkit.s3_bucket}/helm/repositories/teams/{team_name}/*", ], ), iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=["ssm:Get*"], resources=[ f"arn:{partition}:ssm:{region}:{account}:parameter/orbit*", f"arn:{partition}:ssm:{region}:{account}:parameter/emr_launch/", ], ), iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=["ssm:PutParameter"], resources=[ f"arn:{partition}:ssm:{region}:{account}:parameter/orbit/{env_name}/teams/{team_name}/user*", ], ), iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ "redshift:GetClusterCredentials", "redshift:CreateClusterUser", "redshift:DeleteCluster", ], resources=[ f"arn:{partition}:redshift:{region}:{account}:dbuser:orbit-{env_name}-{team_name}*", f"arn:{partition}:redshift:{region}:{account}:dbuser:orbit-{env_name}-{team_name}*/master", f"arn:{partition}:redshift:{region}:{account}:dbuser:orbit-{env_name}-{team_name}*/defaultdb", f"arn:{partition}:redshift:{region}:{account}:dbname:orbit-{env_name}-{team_name}*/defaultdb", f"arn:{partition}:redshift:{region}:{account}:cluster:orbit-{env_name}-{team_name}*", ], ), iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ "sns:*", ], resources=[ f"arn:{partition}:sns:{region}:{account}:{env_name}-{team_name}*", ], ), iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=["iam:PassRole"], resources=[ f"arn:{partition}:iam::{account}:role{role_prefix}{lake_role_name}" ], ), iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ "ssm:Describe*", "codeartifact:Describe*", "codeartifact:Get*", "codeartifact:List*", "codeartifact:Read*", "sts:GetServiceBearerToken", "s3:ListAllMyBuckets", "lambda:List*", "lambda:Get*", "iam:List*", "tag:GetResources", "ecr:Get*", "ecr:List*", "ecr:Describe*", "ecr:BatchGetImage", "ecr:BatchCheckLayerAvailability", "cloudwatch:PutMetricData", "redshift:DescribeClusters", "states:List*", "states:Get*", "states:Describe*", "glue:Get*", "glue:List*", "glue:Search*", "athena:*", "ecs:Describe*", "ecs:ListTasks", "ec2:Describe*", "redshift:DescribeClusters", "elasticmapreduce:List*", "elasticmapreduce:Get*", "elasticmapreduce:Describe*", "elasticmapreduce:TerminateJobFlows", "elasticmapreduce:AddJobFlowSteps", "sagemaker:*", "databrew:*", "lakeformation:GetDataAccess", "fsx:Describe*", "fsx:List*", ], resources=["*"], ), iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ "ecr:*", ], resources=[ f"arn:{partition}:ecr:{region}:{account}:repository/orbit-{env_name}/users/*" ], ), iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ "kms:Encrypt", "kms:Decrypt", "kms:ReEncrypt", "kms:GenerateDataKey", "kms:DescribeKey" ], resources=kms_keys, ), iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ "lambda:InvokeFunction", ], resources=[ f"arn:{partition}:lambda:{region}:{account}:function:orbit-{env_name}-{team_name}-*", f"arn:{partition}:lambda:{region}:{account}:function:orbit-{env_name}-token-validation", f"arn:{partition}:lambda:{region}:{account}:function:orbit-{env_name}-eks-service-handler", ], ), iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ "cloudformation:DescribeStacks", ], resources=[ f"arn:{partition}:cloudformation:{region}:{account}:stack/orbit-{env_name}/*", f"arn:{partition}:cloudformation:{region}:{account}:stack/aws-codeseeder-orbit*", ], ), iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ "ssm:GetParameters", "ssm:DescribeParameters", "ssm:GetParameter", "ssm:DescribeParameter", ], resources=[ f"arn:{partition}:ssm:{region}:{account}:parameter/orbit/{env_name}/teams/{team_name}/*", f"arn:{partition}:ssm:{region}:{account}:parameter/Orbit-Slack-Notifications", ], ), iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ "ssm:DeleteParameter", "ssm:DeleteParameters", ], resources=[ f"arn:{partition}:ssm:{region}:{account}:parameter/orbit/{env_name}/changeset", f"arn:{partition}:ssm:{region}:{account}:parameter/orbit/{env_name}/manifest", ], ), iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ "ssm:DescribeParameters", ], resources=[f"arn:{partition}:ssm:{region}:{account}:*"], ), iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=["s3:Put*"], resources=[ f"arn:{partition}:s3:::{context.toolkit.s3_bucket}", f"arn:{partition}:s3:::{context.toolkit.s3_bucket}/cli/remote/*", ], ), iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ "codebuild:StartBuild", "codebuild:BatchGetBuilds" ], resources=[ f"arn:{partition}:codebuild:{region}:{account}:project/orbit-{env_name}", f"arn:{partition}:codebuild:{region}:{account}:project/codeseeder-orbit", ], ), iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ "logs:CreateLogStream", "logs:CreateLogGroup", "logs:DescribeLogStreams", "logs:PutLogEvents", ], resources=[ f"arn:{partition}:logs:{region}:{account}:log-group:/aws/codebuild/orbit-{env_name}:log-stream:*", # noqa f"arn:{partition}:logs:{region}:{account}:log-group:/aws/codebuild/codeseeder-orbit:log-stream:*", # noqa f"arn:{partition}:logs:{region}:{account}:log-group:/aws-glue-databrew/*:log-stream:*", ], ), iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ "logs:List*", "logs:Describe*", "logs:StartQuery", "logs:StopQuery", "logs:Get*", "logs:Filter*", "events:*", ], resources=[ f"arn:{partition}:logs:{region}:{account}:log-group:/aws/codebuild/orbit-{env_name}*:log-stream:*", # noqa f"arn:{partition}:logs:{region}:{account}:log-group:/aws/codebuild/codeseeder-orbit:log-stream:*", # noqa f"arn:{partition}:logs:{region}:{account}:log-group:/aws-glue-databrew/*:log-stream:*", f"arn:{partition}:logs:{region}:{account}:log-group:/aws/sagemaker/*", f"arn:{partition}:logs:{region}:{account}:log-group:/aws/sagemaker/*:log-stream:*", f"arn:{partition}:logs:{region}:{account}:log-group:/aws/eks/orbit*", f"arn:{partition}:events:{region}:{account}:rule/orbit-{env_name}-{team_name}-*", f"arn:{partition}:logs:{region}:{account}:log-group:/aws-glue-databrew/*", ], ), iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ "ecr:InitiateLayerUpload", ], resources=[ f"arn:{partition}:ecr:{region}:{account}:repository/*", ], ), iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ "eks:DescribeCluster", ], resources=[ f"arn:{partition}:eks:{region}:{account}:cluster/orbit-{env_name}", ], ), ], ) managed_policies = [ lake_operational_policy, # For EKS iam.ManagedPolicy.from_aws_managed_policy_name( managed_policy_name="AmazonEKS_CNI_Policy"), ] # Parse list to IAM policies # First check if the policies are AWS managed or not, and if they have a tag aws_policies, customer_policies = process_policies( policy_names=policy_names, account_id=context.account_id) aws_managed_user_policies = [ iam.ManagedPolicy.from_aws_managed_policy_name( managed_policy_name=policy_name) for policy_name in aws_policies ] orbit_custom_policies = [ iam.ManagedPolicy.from_managed_policy_name( scope=scope, id=policy_name, managed_policy_name=policy_name) for policy_name in customer_policies ] managed_policies = (managed_policies + cast(List[object], aws_managed_user_policies) + cast(List[object], orbit_custom_policies)) role = iam.Role( scope=scope, id=f"lakerole-for-{env_name}-{team_name}", role_name=lake_role_name, assumed_by=cast( iam.IPrincipal, iam.CompositePrincipal( iam.ServicePrincipal("ec2.amazonaws.com"), iam.ServicePrincipal("glue.amazonaws.com"), iam.ServicePrincipal("sagemaker.amazonaws.com"), iam.ServicePrincipal("redshift.amazonaws.com"), iam.ServicePrincipal("codepipeline.amazonaws.com"), iam.ServicePrincipal("codebuild.amazonaws.com"), iam.ServicePrincipal("personalize.amazonaws.com"), iam.ServicePrincipal("databrew.amazonaws.com"), ), ), managed_policies=cast(Optional[Sequence[iam.IManagedPolicy]], managed_policies), max_session_duration=session_timeout, ) if role.assume_role_policy: role.assume_role_policy.add_statements( iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=["sts:AssumeRoleWithWebIdentity"], principals=[ cast( iam.IPrincipal, iam.FederatedPrincipal( federated= f"arn:{partition}:iam::{account}:oidc-provider/{context.eks_oidc_provider}", conditions={ "StringLike": { f"{context.eks_oidc_provider}:sub": f"system:serviceaccount:{team_name}*:*" } }, ), ) ], ), ) return role
def __init__(self, scope: core.Construct, construct_id: str, vpc: ec2.IVpc, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) # Create S3 bucket for Studio bucket = s3.Bucket(self, "StudioBucket", encryption=s3.BucketEncryption.S3_MANAGED, block_public_access=s3.BlockPublicAccess.BLOCK_ALL, versioned=True) # Create security groups eng_sg = ec2.SecurityGroup(self, "EngineSecurityGroup", vpc=vpc, description="EMR Studio Engine", allow_all_outbound=True) core.Tags.of(eng_sg).add("for-use-with-amazon-emr-managed-policies", "true") ws_sg = ec2.SecurityGroup(self, "WorkspaceSecurityGroup", vpc=vpc, description="EMR Studio Workspace", allow_all_outbound=False) core.Tags.of(ws_sg).add("for-use-with-amazon-emr-managed-policies", "true") ws_sg.add_egress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(443), "allow egress on port 443") ws_sg.add_egress_rule(eng_sg, ec2.Port.tcp(18888), "allow egress on port 18888 to eng") eng_sg.add_ingress_rule(ws_sg, ec2.Port.tcp(18888), "allow ingress on port 18888 from ws") # Create Studio roles role = iam.Role( self, "StudioRole", assumed_by=iam.ServicePrincipal("elasticmapreduce.amazonaws.com"), managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name( "AmazonS3FullAccess") ]) role.add_to_policy( iam.PolicyStatement( resources=["*"], actions=[ "ec2:AuthorizeSecurityGroupEgress", "ec2:AuthorizeSecurityGroupIngress", "ec2:CreateSecurityGroup", "ec2:CreateTags", "ec2:DescribeSecurityGroups", "ec2:RevokeSecurityGroupEgress", "ec2:RevokeSecurityGroupIngress", "ec2:CreateNetworkInterface", "ec2:CreateNetworkInterfacePermission", "ec2:DeleteNetworkInterface", "ec2:DeleteNetworkInterfacePermission", "ec2:DescribeNetworkInterfaces", "ec2:ModifyNetworkInterfaceAttribute", "ec2:DescribeTags", "ec2:DescribeInstances", "ec2:DescribeSubnets", "ec2:DescribeVpcs", "elasticmapreduce:ListInstances", "elasticmapreduce:DescribeCluster", "elasticmapreduce:ListSteps" ], effect=iam.Effect.ALLOW)) core.Tags.of(role).add("for-use-with-amazon-emr-managed-policies", "true") user_role = iam.Role( self, "StudioUserRole", assumed_by=iam.ServicePrincipal("elasticmapreduce.amazonaws.com")) core.Tags.of(role).add("for-use-with-amazon-emr-managed-policies", "true") user_role.add_to_policy( iam.PolicyStatement(actions=[ "elasticmapreduce:CreateEditor", "elasticmapreduce:DescribeEditor", "elasticmapreduce:ListEditors", "elasticmapreduce:StartEditor", "elasticmapreduce:StopEditor", "elasticmapreduce:DeleteEditor", "elasticmapreduce:OpenEditorInConsole", "elasticmapreduce:AttachEditor", "elasticmapreduce:DetachEditor", "elasticmapreduce:CreateRepository", "elasticmapreduce:DescribeRepository", "elasticmapreduce:DeleteRepository", "elasticmapreduce:ListRepositories", "elasticmapreduce:LinkRepository", "elasticmapreduce:UnlinkRepository", "elasticmapreduce:DescribeCluster", "elasticmapreduce:ListInstanceGroups", "elasticmapreduce:ListBootstrapActions", "elasticmapreduce:ListClusters", "elasticmapreduce:ListSteps", "elasticmapreduce:CreatePersistentAppUI", "elasticmapreduce:DescribePersistentAppUI", "elasticmapreduce:GetPersistentAppUIPresignedURL", "secretsmanager:CreateSecret", "secretsmanager:ListSecrets", "secretsmanager:TagResource", "emr-containers:DescribeVirtualCluster", "emr-containers:ListVirtualClusters", "emr-containers:DescribeManagedEndpoint", "emr-containers:ListManagedEndpoints", "emr-containers:CreateAccessTokenForManagedEndpoint", "emr-containers:DescribeJobRun", "emr-containers:ListJobRuns" ], resources=["*"], effect=iam.Effect.ALLOW)) user_role.add_to_policy( iam.PolicyStatement( resources=["*"], actions=[ "servicecatalog:DescribeProduct", "servicecatalog:DescribeProductView", "servicecatalog:DescribeProvisioningParameters", "servicecatalog:ProvisionProduct", "servicecatalog:SearchProducts", "servicecatalog:UpdateProvisionedProduct", "servicecatalog:ListProvisioningArtifacts", "servicecatalog:DescribeRecord", "cloudformation:DescribeStackResources" ], effect=iam.Effect.ALLOW)) user_role.add_to_policy( iam.PolicyStatement(resources=["*"], actions=["elasticmapreduce:RunJobFlow"], effect=iam.Effect.ALLOW)) user_role.add_to_policy( iam.PolicyStatement(resources=[ role.role_arn, f"arn:aws:iam::{self.account}:role/EMR_DefaultRole", f"arn:aws:iam::{self.account}:role/EMR_EC2_DefaultRole" ], actions=["iam:PassRole"], effect=iam.Effect.ALLOW)) user_role.add_to_policy( iam.PolicyStatement(resources=["arn:aws:s3:::*"], actions=[ "s3:ListAllMyBuckets", "s3:ListBucket", "s3:GetBucketLocation" ], effect=iam.Effect.ALLOW)) user_role.add_to_policy( iam.PolicyStatement(resources=[ f"arn:aws:s3:::{bucket.bucket_name}/*", f"arn:aws:s3:::aws-logs-{self.account}-{self.region}/elasticmapreduce/*" ], actions=["s3:GetObject"], effect=iam.Effect.ALLOW)) policy_document = { "Version": "2012-10-17T00:00:00.000Z", "Statement": [{ "Action": [ "elasticmapreduce:CreateEditor", "elasticmapreduce:DescribeEditor", "elasticmapreduce:ListEditors", "elasticmapreduce:StartEditor", "elasticmapreduce:StopEditor", "elasticmapreduce:DeleteEditor", "elasticmapreduce:OpenEditorInConsole", "elasticmapreduce:AttachEditor", "elasticmapreduce:DetachEditor", "elasticmapreduce:CreateRepository", "elasticmapreduce:DescribeRepository", "elasticmapreduce:DeleteRepository", "elasticmapreduce:ListRepositories", "elasticmapreduce:LinkRepository", "elasticmapreduce:UnlinkRepository", "elasticmapreduce:DescribeCluster", "elasticmapreduce:ListInstanceGroups", "elasticmapreduce:ListBootstrapActions", "elasticmapreduce:ListClusters", "elasticmapreduce:ListSteps", "elasticmapreduce:CreatePersistentAppUI", "elasticmapreduce:DescribePersistentAppUI", "elasticmapreduce:GetPersistentAppUIPresignedURL", "secretsmanager:CreateSecret", "secretsmanager:ListSecrets", "emr-containers:DescribeVirtualCluster", "emr-containers:ListVirtualClusters", "emr-containers:DescribeManagedEndpoint", "emr-containers:ListManagedEndpoints", "emr-containers:CreateAccessTokenForManagedEndpoint", "emr-containers:DescribeJobRun", "emr-containers:ListJobRuns" ], "Resource": "*", "Effect": "Allow", "Sid": "AllowBasicActions" }, { "Action": [ "servicecatalog:DescribeProduct", "servicecatalog:DescribeProductView", "servicecatalog:DescribeProvisioningParameters", "servicecatalog:ProvisionProduct", "servicecatalog:SearchProducts", "servicecatalog:UpdateProvisionedProduct", "servicecatalog:ListProvisioningArtifacts", "servicecatalog:DescribeRecord", "cloudformation:DescribeStackResources" ], "Resource": "*", "Effect": "Allow", "Sid": "AllowIntermediateActions" }, { "Action": ["elasticmapreduce:RunJobFlow"], "Resource": "*", "Effect": "Allow", "Sid": "AllowAdvancedActions" }, { "Action": "iam:PassRole", "Resource": [ role.role_arn, f"arn:aws:iam::{self.account}:role/EMR_DefaultRole", f"arn:aws:iam::{self.account}:role/EMR_EC2_DefaultRole" ], "Effect": "Allow", "Sid": "PassRolePermission" }, { "Action": [ "s3:ListAllMyBuckets", "s3:ListBucket", "s3:GetBucketLocation" ], "Resource": "arn:aws:s3:::*", "Effect": "Allow", "Sid": "S3ListPermission" }, { "Action": ["s3:GetObject"], "Resource": [ f"arn:aws:s3:::{bucket.bucket_name}/*", f"arn:aws:s3:::aws-logs-{self.account}-{self.region}/elasticmapreduce/*" ], "Effect": "Allow", "Sid": "S3GetObjectPermission" }] } custom_policy_document = iam.PolicyDocument.from_json(policy_document) new_managed_policy = iam.ManagedPolicy(self, "LBControlPolicy", document=custom_policy_document) # Set up Studio studio = emr.CfnStudio( self, "MyEmrStudio", auth_mode="SSO", default_s3_location=f"s3://{bucket.bucket_name}/studio/", engine_security_group_id=eng_sg.security_group_id, name="MyEmrEksStudio", service_role=role.role_arn, subnet_ids=[n.subnet_id for n in vpc.private_subnets], user_role=user_role.role_arn, vpc_id=vpc.vpc_id, workspace_security_group_id=ws_sg.security_group_id, description=None, tags=None) core.CfnOutput(self, "StudioUrl", value=studio.attr_url) # Create session mapping studiosm = emr.CfnStudioSessionMapping( self, "MyStudioSM", identity_name=self.node.try_get_context("username"), identity_type="USER", session_policy_arn=new_managed_policy.managed_policy_arn, studio_id=studio.attr_studio_id)
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # dynamodb table mytable = _dynamodb.Table( self, "mytable", table_name="my-table", partition_key=_dynamodb.Attribute( name=id, type=_dynamodb.AttributeType.STRING), removal_policy=core.RemovalPolicy.DESTROY, server_side_encryption=True, ) # lambda function source_bucket = _s3.Bucket.from_bucket_name(self, "sourceBucket", "cdk-tutorials-resources") custom_lambda_s3 = _lambda.Function( self, "customLambdaS3", function_name="custom_lambda_s3", runtime=_lambda.Runtime.PYTHON_3_7, handler="custom_lambda.lambda_handler", code=_lambda.S3Code(bucket=source_bucket, key="custom_lambda.zip"), timeout=core.Duration.seconds(3), reserved_concurrent_executions=1, environment={ "Log_Group": "INFO", "TABLE_NAME": f"{mytable.table_name}", "BUCKET_NAME": f"{source_bucket.bucket_name}", }, ) _logs.LogGroup( self, "customLogGroupS3", log_group_name=f"/aws/lambda/{custom_lambda_s3.function_name}", removal_policy=core.RemovalPolicy.DESTROY, retention=_logs.RetentionDays.ONE_WEEK, ) # lambda role s3_policy = _iam.ManagedPolicy( self, "listBucketsPolicy", description="list s3 buckets", managed_policy_name="listBuckets", statements=[ _iam.PolicyStatement(effect=_iam.Effect.ALLOW, actions=["s3:List*"], resources=["*"]) ], ) db_policy = _iam.ManagedPolicy( self, "dbPolicy", description="get and put items", managed_policy_name="dbPutGet", statements=[ _iam.PolicyStatement( effect=_iam.Effect.ALLOW, actions=["dynamodb:GetItem", "dynamodb:PutItem"], resources=[f"{mytable.table_arn}"], ) ], ) custom_lambda_s3.role.add_managed_policy(s3_policy) custom_lambda_s3.role.add_managed_policy(db_policy)
def __init__(self, parent: core.App, name: str): super().__init__(parent, name) accessible_apps = ['GWJR', 'Dev'] accessible_lamb_app_regs = [ 'arn:aws:{}:{}:{}:function:*{}*'.format(LambdaActions.name, self.region, self.account, app) for app in accessible_apps ] accessible_cfn_app_regs = [ 'arn:aws:{}:{}:{}:*{}*'.format(CFNActions.name, self.region, self.account, app) for app in accessible_apps ] accessible_iam_app_regs = [ 'arn:aws:{}::{}:role/*{}*'.format(IAMIAMActions.name, self.account, app) for app in accessible_apps ] accessible_app_regs = accessible_cfn_app_regs + accessible_iam_app_regs + accessible_lamb_app_regs self.app_access_statement = iam.PolicyStatement( actions=[ CFNActions.FULL_ACCESS, IAMIAMActions.FULL_ACCESS, LambdaActions.INVOKE_FUNCTION ], resources=accessible_app_regs) self.sb_user = iam.User( self, 'SBUser', managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name( 'AWSLambdaReadOnlyAccess'), iam.ManagedPolicy.from_aws_managed_policy_name( 'IAMReadOnlyAccess'), iam.ManagedPolicy.from_aws_managed_policy_name( 'AWSCloudFormationReadOnlyAccess'), iam.ManagedPolicy.from_aws_managed_policy_name( 'AmazonRoute53ReadOnlyAccess'), iam.ManagedPolicy.from_aws_managed_policy_name( 'AmazonS3FullAccess'), iam.ManagedPolicy(self, 'SBUserPolicy', statements=[self.app_access_statement]) ]) self.sb_user_access_key = iam.CfnAccessKey( self, 'SBUserAccessKey', user_name=self.sb_user.user_name, status='Active') creds = json.dumps({ 'AWS_ACCESS_KEY_ID': self.sb_user_access_key.ref, 'AWS_SECRET_ACCESS_KEY': self.sb_user_access_key.attr_secret_access_key }) self.aws_cred_secret = PreDefinedSecret('SBCreds', creds) self.secret_stack = Secrets(self, "Secrets", authorized_users=[self.sb_user], predefined_secrets=[self.aws_cred_secret]) self.deploy_role = iam.Role( self, 'DepRol', assumed_by=pyiam.CFN_PRINCIPAL, inline_policies={ 'Pol': iam.PolicyDocument(statements=[ iam.PolicyStatement( actions=[ CFNActions.FULL_ACCESS, IAMIAMActions.FULL_ACCESS, SecretsManagerActions.FULL_ACCESS, KMSActions.FULL_ACCESS ], resources=[ 'arn:aws:{}:{}:{}:*'.format( KMSActions.name, self.region, self.account), 'arn:aws:{}:{}:{}:*{}*'.format( CFNActions.name, self.region, self.account, self.stack_name), 'arn:aws:{}::{}:role/*{}*'. format(IAMIAMActions.name, self.account, self. stack_name), 'arn:aws:{}::{}:user/*{}*'. format(IAMIAMActions.name, self.account, self. stack_name), 'arn:aws:{}::{}:policy/*{}*'. format(IAMIAMActions.name, self.account, self. stack_name), 'arn:aws:{}:{}:{}:*{}*'.format( SecretsManagerActions.name, self.region, self.account, self.stack_name), 'arn:aws:{}:{}:{}:*{}*'.format( SecretsManagerActions.name, self.region, self.account, self.aws_cred_secret.secret_name) ] + [ 'arn:aws:{}::{}:role/*{}*'.format( IAMIAMActions.name, self.account, accessible_app) for accessible_app in accessible_apps ]), iam.PolicyStatement(actions=[KMSActions.FULL_ACCESS], resources=['*']) ]) }) core.CfnOutput(self, 'DeployRoleArn', value=self.deploy_role.role_arn) core.CfnOutput(self, 'SBUserName', value=self.sb_user.user_name)
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) NOTIFY_EMAIL = self.node.try_get_context("notify_email") SLACK_WEBHOOK_URL = self.node.try_get_context("webhook_url") WHITE_LIST_GROUP = self.node.try_get_context("white_list_group") if (not NOTIFY_EMAIL or not SLACK_WEBHOOK_URL or not WHITE_LIST_GROUP): logger.error( f"Required context variables for {id} were not provided!") else: # 1. Create Response Lambda lambda_dir_path = os.path.join(os.getcwd(), "ir_cdk_stacks", "in_clt_01") response_lambda = _lambda.Function( self, "InClt01ResponseFunction", runtime=_lambda.Runtime.PYTHON_3_8, handler="clUnauthAccessResponse.lambda_handler", code=_lambda.Code.from_asset(lambda_dir_path), function_name="InClt01ResponseFunction", environment={ "webhook_url": SLACK_WEBHOOK_URL, "white_list_group": WHITE_LIST_GROUP, }) ep = {"source": ["aws.cloudtrail"]} # 2. Make that rule Track Cloudtrail events rule = events.Rule( self, "cdkRule", description= 'Rule created by CDK for monitoring CloudTrail access', enabled=True, rule_name="CltAccessRule", event_pattern=ep) # 3. Add Permissions and role to Lambda action = [ "iam:*", "organizations:DescribeAccount", "organizations:DescribeOrganization", "organizations:DescribeOrganizationalUnit", "organizations:DescribePolicy", "organizations:ListChildren", "organizations:ListParents", "organizations:ListPoliciesForTarget", "organizations:ListRoots", "organizations:ListPolicies", "organizations:ListTargetsForPolicy" ] response_lambda.add_to_role_policy( iam.PolicyStatement( actions=action, effect=iam.Effect.ALLOW, resources=["*"], )) # 4. Permission to send SNS notification response_lambda.add_to_role_policy( iam.PolicyStatement( actions=["sns:*"], effect=iam.Effect.ALLOW, resources=["*"], )) # 5. Add Lambda as target of Rule rule.add_target(event_target.LambdaFunction(response_lambda)) # 6. Create SNS topic and subscription topic = sns.Topic(self, "CLTAccessCDK", topic_name="CLTAccessCDK") # topic.grant_publish(iam.ServicePrincipal("*")) topic.add_subscription(subs.EmailSubscription(NOTIFY_EMAIL)) # 7. Create IAM allow/deny policy cltDenyAccessPolicy = iam.ManagedPolicy( self, "InCLT01DenyPolicy", managed_policy_name="CltDenyAccess", statements=[ iam.PolicyStatement(effect=iam.Effect.DENY, actions=["cloudtrail:*"], resources=["*"]) ]) # 8. Create IAM group cltAccessGroup = iam.Group(self, "cltAccessGroup", group_name="cltAccessGroup")
def provision(self, name: str, cluster_name: str, s3_policy: iam.ManagedPolicy, r53_zone_ids: List[str]): asg_group_statement = iam.PolicyStatement( actions=[ "autoscaling:DescribeAutoScalingInstances", "autoscaling:SetDesiredCapacity", "autoscaling:TerminateInstanceInAutoScalingGroup", ], resources=["*"], conditions={ "StringEquals": { "autoscaling:ResourceTag/eks:cluster-name": cluster_name } }, ) autoscaler_policy = iam.ManagedPolicy( self.scope, "autoscaler", managed_policy_name=f"{name}-autoscaler", statements=[ iam.PolicyStatement( actions=[ "autoscaling:DescribeAutoScalingGroups", "autoscaling:DescribeLaunchConfigurations", "autoscaling:DescribeTags", "ec2:DescribeLaunchTemplateVersions", ], resources=["*"], ), asg_group_statement, ], ) if r53_zone_ids: route53_policy = iam.ManagedPolicy( self.scope, "route53", managed_policy_name=f"{name}-route53", statements=[ iam.PolicyStatement( actions=["route53:ListHostedZones"], resources=["*"], ), iam.PolicyStatement( actions=[ "route53:ChangeResourceRecordSets", "route53:ListResourceRecordSets", ], resources=[ f"arn:aws:route53:::hostedzone/{zone_id}" for zone_id in r53_zone_ids ], ), ], ) ecr_policy = iam.ManagedPolicy( self.scope, "DominoEcrReadOnly", managed_policy_name=f"{name}-DominoEcrRestricted", statements=[ iam.PolicyStatement( effect=iam.Effect.DENY, actions=["ecr:*"], conditions={ "StringNotEqualsIfExists": { "ecr:ResourceTag/domino-deploy-id": name } }, resources=[f"arn:aws:ecr:*:{self.scope.account}:*"], ), ], ) managed_policies = [ s3_policy, ecr_policy, autoscaler_policy, iam.ManagedPolicy.from_aws_managed_policy_name( 'AmazonEKSWorkerNodePolicy'), iam.ManagedPolicy.from_aws_managed_policy_name( 'AmazonEC2ContainerRegistryReadOnly'), iam.ManagedPolicy.from_aws_managed_policy_name( 'AmazonEKS_CNI_Policy'), iam.ManagedPolicy.from_aws_managed_policy_name( 'AmazonSSMManagedInstanceCore'), ] if r53_zone_ids: managed_policies.append(route53_policy) return iam.Role( self.scope, f'{name}-NG', role_name=f"{name}-NG", assumed_by=iam.ServicePrincipal('ec2.amazonaws.com'), managed_policies=managed_policies, )
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) NOTIFY_EMAIL = self.node.try_get_context("notify_email") SLACK_WEBHOOK_URL = self.node.try_get_context("webhook_url") WHITE_LIST_GROUP = self.node.try_get_context("white_list_group") if ( not NOTIFY_EMAIL or not SLACK_WEBHOOK_URL or not WHITE_LIST_GROUP ): logger.error(f"Required context variables for {id} were not provided!") else: # The code that defines your stack goes here ep1 = { "source": [ "aws.logs" ] } ep2 = { "source": [ "aws.cloudwatch" ] } rule1 = events.Rule(self, "cdkRule1_clw", description='Rule created by CLW CDK', enabled=True, rule_name="rule1bycdk_clw", event_pattern=ep1) rule2 = events.Rule(self, "cdkRule2_clw", description='Rule created by CLW CDK', enabled=True, rule_name="rule2bycdk_clw", event_pattern=ep2) # 3. Create response lambda and add it as a target of the rule lambda_dir_path = os.path.join(os.getcwd(), "ir_cdk_stacks", "in_clw_01") response_lambda = _lambda.Function( self, "InClw01ResponseFunction", runtime=_lambda.Runtime.PYTHON_3_7, handler="clwUnauthAccessResponse.lambda_handler", code=_lambda.Code.from_asset(lambda_dir_path), function_name="InClw01ResponseFunction", environment = { "webhook_url": SLACK_WEBHOOK_URL, "white_list_group": WHITE_LIST_GROUP, } ) response_lambda.add_to_role_policy( iam.PolicyStatement( actions=["*"], effect=iam.Effect.ALLOW, resources=["*"], ) ) rule1.add_target(event_target.LambdaFunction(response_lambda)) rule2.add_target(event_target.LambdaFunction(response_lambda)) # 4. Create SNS topic and subscription topic = sns.Topic(self, "CDKCLWAccess", topic_name="CDKCLWAccess") topic.add_subscription(subs.EmailSubscription(NOTIFY_EMAIL)) #topic.add_subscription(subs.EmailSubscription('*****@*****.**')) # 5. Create IAM allow/deny policy clwDenyAccessPolicy1 = iam.ManagedPolicy(self, "InCLW01DenyPolicy1", managed_policy_name = "ClWDenyAccess1", statements=[ iam.PolicyStatement( effect=iam.Effect.DENY, actions=["logs:*"], resources=["*"] ) ]) clwDenyAccessPolicy2 = iam.ManagedPolicy(self, "InCLW01DenyPolicy2", managed_policy_name="ClWDenyAccess2", statements=[ iam.PolicyStatement( effect=iam.Effect.DENY, actions=["cloudwatch:*"], resources=["*"] ) ]) # 6. Create IAM group clwAccessGroup = iam.Group( self, "clwAccessGroup", group_name = "clwAccessGroup" )
def __init__(self, scope: core.Construct, id: str, props: dict, **kwargs): super().__init__(scope, id, **kwargs) self.output_props = props.copy() self.props = props r_a_prefix = util.get_region_acct_prefix(kwargs['env']) self.APPLICATION_PREFIX = self.props['APPLICATION_PREFIX'] ''' ECR Repo ''' self.repo_recommendation_service = self.make_ecr_repo( "recommendation-service", "Recommendation Service") self.rep_portfolio_manager = self.make_ecr_repo( "portfolio-manager-service", "Portfolio Manager Service") ''' IAM Role and Policy used by Fargate to execute task ''' policy_name = "policy-%s-ecs-task-execution" % self.APPLICATION_PREFIX docker_exec_policy = iam.ManagedPolicy(self, policy_name) docker_exec_policy.add_statements( iam.PolicyStatement(actions=[ "ec2:AttachNetworkInterface", "ec2:CreateNetworkInterface", "ec2:CreateNetworkInterfacePermission", "ec2:DeleteNetworkInterface", "ec2:DeleteNetworkInterfacePermission", "ec2:Describe*", "ec2:DetachNetworkInterface", "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", "elasticloadbalancing:DeregisterTargets", "elasticloadbalancing:Describe*", "elasticloadbalancing:RegisterInstancesWithLoadBalancer", "elasticloadbalancing:RegisterTargets", "route53:ChangeResourceRecordSets", "route53:CreateHealthCheck", "route53:DeleteHealthCheck", "route53:Get*", "route53:List*", "route53:UpdateHealthCheck", "servicediscovery:DeregisterInstance", "servicediscovery:Get*", "servicediscovery:List*", "servicediscovery:RegisterInstance", "servicediscovery:UpdateInstanceCustomHealthStatus" ], conditions=None, effect=iam.Effect.ALLOW, resources=["*"])) docker_exec_policy.add_statements( iam.PolicyStatement( actions=[ "cloudwatch:DeleteAlarms", "cloudwatch:DescribeAlarms", "cloudwatch:PutMetricAlarm" ], conditions=None, effect=iam.Effect.ALLOW, resources=["arn:aws:cloudwatch:%s:alarm:*" % r_a_prefix])) docker_exec_policy.add_statements( iam.PolicyStatement( actions=["ec2:CreateTags"], conditions=None, effect=iam.Effect.ALLOW, resources=["arn:aws:ec2:%s:network-interface/*" % r_a_prefix])) docker_exec_policy.add_statements( iam.PolicyStatement(actions=[ "logs:CreateLogGroup", "logs:DescribeLogGroups", "logs:PutRetentionPolicy" ], conditions=None, effect=iam.Effect.ALLOW, resources=[ "arn:aws:logs:%s:log-group:/aws/ecs/*" % r_a_prefix ])) docker_exec_policy.add_statements( iam.PolicyStatement( actions=[ "logs:CreateLogStream", "logs:DescribeLogStreams", "logs:PutLogEvents" ], conditions=None, effect=iam.Effect.ALLOW, resources=[ "arn:aws:logs:%s:log-group:/aws/ecs/*:log-stream:*" % r_a_prefix ])) exec_role_name = "role-%s-ecs-task-execution" % self.APPLICATION_PREFIX self.ecs_task_exec_role = iam.Role( self, exec_role_name, assumed_by=iam.ServicePrincipal("ecs-tasks.amazonaws.com"), description="%s Execution role assumed by ECS" % self.APPLICATION_PREFIX, managed_policies=[docker_exec_policy], role_name=exec_role_name) util.tag_resource( self.ecs_task_exec_role, exec_role_name, "IAM Role and Policy used by Fargate to execute task") ''' Parameter Store variables: Intrinio API Key TDAMeritrade Client ID and Refresh Token ''' intrinio_api_key_name = 'INTRINIO_API_KEY' self.intrinio_api_key_param = self.make_ssm_parameter( intrinio_api_key_name, 'put_api_key_here', 'API Key used to access Intrinio financial data') td_ameritrade_account_id_name = 'TDAMERITRADE_ACCOUNT_ID' self.tdameritrade_account_id = self.make_ssm_parameter( td_ameritrade_account_id_name, 'put_account_id_here', 'The TDAmeritrade Account ID') td_ameritrade_client_id_name = 'TDAMERITRADE_CLIENT_ID' self.tdameritrade_client_id = self.make_ssm_parameter( td_ameritrade_client_id_name, 'put_client_id_here', 'The Client Key used to authenticate the application') td_ameritrade_refresh_token_name = 'TDAMERITRADE_REFRESH_TOKEN' self.tdameritrade_refresh_token = self.make_ssm_parameter( td_ameritrade_refresh_token_name, 'put_refresh_token_here', 'OAuth refresh token used to generate temporary Access Keys') ''' Fargate Tasks: 1) Recommendation Service 2) Portfolio Manager ''' self.make_fargate_scheduled_task( "recommendation-service", "Recommendation service task definition", self.repo_recommendation_service, "/ecs/recommendation-service", [ '-ticker_file', 'djia30.txt', '-output_size', '3', 'production', '-app_namespace', self.APPLICATION_PREFIX ], { intrinio_api_key_name: ecs.Secret.from_ssm_parameter(self.intrinio_api_key_param) }, "Recommendation service monthly scheduled task", "cron(0 10 ? * MON-FRI *)") self.make_fargate_scheduled_task( "portfolio-manager-service", "Portfolio Manager service task definition", self.rep_portfolio_manager, "/ecs/portfolio-manager", [ '-app_namespace', self.APPLICATION_PREFIX, "-portfolio_size", "3" ], { intrinio_api_key_name: ecs.Secret.from_ssm_parameter(self.intrinio_api_key_param), td_ameritrade_account_id_name: ecs.Secret.from_ssm_parameter(self.tdameritrade_account_id), td_ameritrade_client_id_name: ecs.Secret.from_ssm_parameter(self.tdameritrade_client_id), td_ameritrade_refresh_token_name: ecs.Secret.from_ssm_parameter(self.tdameritrade_refresh_token) }, "Portfolio Manager daily task", "cron(0 15 ? * MON-FRI *)") ''' Outputs ''' self.output_props[ 'repo_recommendation_service'] = self.repo_recommendation_service self.output_props[ 'repo_portfolio_manager'] = self.rep_portfolio_manager
def create_service_catalog_template(self, user_role_arn: str): ## Now it's time for service catalog stuff sc_role = iam.Role( self, "EMRStudioClusterTemplateLaunchRole", assumed_by=iam.ServicePrincipal("servicecatalog.amazonaws.com"), ) sc_policy = iam.ManagedPolicy( self, "EMRStudioClusterTemplatePolicy", roles=[sc_role], document=iam.PolicyDocument(statements=[ iam.PolicyStatement( actions=[ "cloudformation:CreateStack", "cloudformation:DeleteStack", "cloudformation:DescribeStackEvents", "cloudformation:DescribeStacks", "cloudformation:GetTemplateSummary", "cloudformation:SetStackPolicy", "cloudformation:ValidateTemplate", "cloudformation:UpdateStack", "elasticmapreduce:RunJobFlow", "elasticmapreduce:DescribeCluster", "elasticmapreduce:TerminateJobFlows", "servicecatalog:*", "s3:GetObject", ], resources=["*"], ), iam.PolicyStatement( actions=["iam:PassRole"], resources=[ f"arn:aws:iam::{cdk.Aws.ACCOUNT_ID}:role/EMR_DefaultRole", f"arn:aws:iam::{cdk.Aws.ACCOUNT_ID}:role/EMR_EC2_DefaultRole", ], ), ]), ) sc_portfolio = servicecatalog.CfnPortfolio( self, "EMRStudioClusterTemplatePortfolio", display_name="ClusterTemplatePortfolio", provider_name="emr-studio-examples", ) sc_portfolio_assoction = servicecatalog.CfnPortfolioPrincipalAssociation( self, "EMRStudioClusterTemplatePortfolioPrincipalAssociationForEndUser", principal_arn=user_role_arn, portfolio_id=sc_portfolio.ref, principal_type="IAM", ) sc_portfolio_assoction.node.add_dependency(sc_portfolio) basemap_cluster = servicecatalog.CfnCloudFormationProduct( self, "EMRStudioBasemapProduct", name="matplotlib-cluster", description= "An emr-6.2.0 cluster that has matplotlib pre-installed.", owner="emr-studio-examples", provisioning_artifact_parameters=[ servicecatalog.CfnCloudFormationProduct. ProvisioningArtifactPropertiesProperty( name="Matplotlib Cluster Template", description="Matplotlib Cluster Template", info={ "LoadTemplateFromURL": "https://gist.githubusercontent.com/dacort/14466352d025c7fcdeafda438de1384b/raw/17a2e8980b5629c390155a65116cec9f056bda31/matplotlib-cluster.yaml" }, ) ], ) sc_productassoc = servicecatalog.CfnPortfolioProductAssociation( self, "EMRStudioBasemapProductPortfolioMapping", portfolio_id=sc_portfolio.ref, product_id=basemap_cluster.ref, ) sc_productassoc.node.add_dependency(sc_portfolio) sc_productassoc.node.add_dependency(basemap_cluster) sc_constraint = servicecatalog.CfnLaunchRoleConstraint( self, "EMRStudioPortfolioLaunchRoleConstraint", portfolio_id=sc_portfolio.ref, product_id=basemap_cluster.ref, role_arn=sc_role.role_arn, ) sc_constraint.node.add_dependency(sc_portfolio) sc_constraint.node.add_dependency(basemap_cluster)
def __init__(self, scope, id, **kwargs): super().__init__(scope, id, **kwargs) # Create random string to be used as suffix on some resource names resource_suffix = ''.join( random.choice(string.ascii_lowercase) for i in range(8)) # Save it as SSM parameter to be used in runtime ssm.StringParameter(self, "RESOURCE_SUFFIX", string_value=resource_suffix, parameter_name="RESOURCE_SUFFIX") # ====================================== VPC ====================================== # Create VPC vpc = ec2.Vpc(self, "sorterbot-vpc", cidr="10.0.0.0/16", enable_dns_support=True, enable_dns_hostnames=True, max_azs=2, nat_gateways=0, subnet_configuration=[ { "subnetType": ec2.SubnetType.PUBLIC, "name": "sorterbot-public-subnet-a", "cidrMask": 24, }, { "subnetType": ec2.SubnetType.PUBLIC, "name": "sorterbot-public-subnet-b", "cidrMask": 24, }, ]) # Create security groups sg_vpc = ec2.SecurityGroup(self, "sorterbot-vpc-sg", vpc=vpc, allow_all_outbound=True, security_group_name="sorterbot-vpc-sg") sg_vpc.add_ingress_rule(sg_vpc, ec2.Port.all_traffic()) sg_control = ec2.SecurityGroup( self, "sorterbot-control-sg", vpc=vpc, allow_all_outbound=True, security_group_name="sorterbot-control-sg") sg_control.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(22)) sg_control.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(5432)) sg_control.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(80)) # ====================================== IAM ====================================== cloud_role = iam.CfnRole( self, "SorterBotCloudRole", role_name="SorterBotCloudRole", assume_role_policy_document={ "Version": "2012-10-17", "Statement": [{ "Sid": "", "Effect": "Allow", "Principal": { "Service": "ecs-tasks.amazonaws.com" }, "Action": "sts:AssumeRole" }] }, managed_policy_arns=[ "arn:aws:iam::aws:policy/AmazonS3FullAccess", "arn:aws:iam::aws:policy/service-role/AmazonECSTaskExecutionRolePolicy", ]) # Create IAM policies iam.ManagedPolicy(self, "SorterBotSecretsForECSPolicy", managed_policy_name="SorterBotSecretsForECSPolicy", roles=[cloud_role], statements=[ iam.PolicyStatement( resources=["*"], actions=[ "ssm:GetParameter", "ssm:GetParameters", "secretsmanager:GetSecretValue", "kms:Decrypt" ]) ]) # ====================================== S3 ====================================== # Create S3 buckets s3.Bucket(self, f"sorterbot-{resource_suffix}", bucket_name=f"sorterbot-{resource_suffix}", removal_policy=core.RemovalPolicy.DESTROY) s3.Bucket(self, f"sorterbot-weights-{resource_suffix}", bucket_name=f"sorterbot-weights-{resource_suffix}", removal_policy=core.RemovalPolicy.DESTROY) s3.Bucket(self, f"sorterbot-static-{resource_suffix}", bucket_name=f"sorterbot-static-{resource_suffix}", removal_policy=core.RemovalPolicy.DESTROY, cors=[ s3.CorsRule(allowed_methods=[s3.HttpMethods.GET], allowed_origins=["*"], allowed_headers=["*"]) ]) # ====================================== EC2 ====================================== # Create EC2 instance for Control Panel control_panel_instance = ec2.Instance( self, f"sorterbot-control-panel-{resource_suffix}", instance_name= f"sorterbot-control-panel-{resource_suffix}", # Since deleted instances stay around for a while in terminated state, random suffix is needed to prevent errors when destroying stack # noqa: E501 instance_type=ec2.InstanceType("t2.micro"), machine_image=ec2.MachineImage.latest_amazon_linux( generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX_2), vpc=vpc, key_name="sorterbot", vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC), security_group=sg_control) control_panel_instance.add_to_role_policy( iam.PolicyStatement(resources=["*"], actions=[ "ec2:DescribeNetworkInterfaces", "ssm:GetParameter", "ecs:*", "s3:*" ])) # ====================================== RDS ====================================== # Declare connection details master_username = "******" master_user_password = core.SecretValue.ssm_secure("PG_PASS", version="1") port = 5432 # Create postgres database database = rds.DatabaseInstance( self, "sorterbot_postgres", allocated_storage=10, backup_retention=core.Duration.days( 0 ), # Don't save backups since storing them is not covered by the Free Tier database_name="sorterbot", delete_automated_backups=True, deletion_protection=False, engine=rds.DatabaseInstanceEngine.POSTGRES, engine_version="11", instance_class=ec2.InstanceType("t2.micro"), # Stay in Free Tier instance_identifier="sorterbot-postgres", master_username=master_username, master_user_password=master_user_password, port=port, storage_type=rds.StorageType.GP2, vpc=vpc, vpc_placement=ec2.SubnetSelection( subnet_type=ec2.SubnetType.PUBLIC ), # Make DB publicly accessible (with credentials) removal_policy=core.RemovalPolicy.DESTROY) # Add ingress rule to allow external connections database.connections.allow_default_port_from_any_ipv4() # ====================================== ECR ====================================== # Create ECR repository for Docker images ecr.Repository(self, "sorterbot-ecr", repository_name="sorterbot-ecr", removal_policy=core.RemovalPolicy.DESTROY) # ====================================== ECS ====================================== # Create ECS Cluster, Task Definition and Fargate Service ecs_cluster = ecs.Cluster(self, "sorterbot-ecs-cluster", vpc=vpc, cluster_name="sorterbot-ecs-cluster") task_definition = ecs.FargateTaskDefinition( self, "sorterbot-fargate-service", cpu=512, memory_limit_mib=4096) task_definition.add_container( "sorterbot-cloud-container", image=ecs.ContainerImage.from_registry("amazon/amazon-ecs-sample")) ecs.FargateService(self, "sorterbot-ecs-service", cluster=ecs_cluster, task_definition=task_definition, assign_public_ip=True, service_name="sorterbot-ecs-service", desired_count=0, security_group=sg_vpc) # Save resource suffix to disk to be used when destroying with open( Path(__file__).parents[1].joinpath("scripts", "variables", "RESOURCE_SUFFIX"), "w") as outfile: outfile.write(resource_suffix)
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # The code that defines your stack goes here EcsStack.readConfig(0) vpc = ec.Vpc( self, "Main", cidr="11.0.0.0/26", max_azs=2, nat_gateways=1, subnet_configuration=[ ec.SubnetConfiguration(name="public", cidr_mask=28, subnet_type=ec.SubnetType.PUBLIC), ec.SubnetConfiguration(name="private", cidr_mask=28, subnet_type=ec.SubnetType.PRIVATE) ]) cluster = ecs.Cluster(self, "TestingCluster", vpc=vpc) # defining the task iam role taskRole = iam.Role( self, id="taskRole", assumed_by=iam.CompositePrincipal( iam.ServicePrincipal(service='ecs-tasks.amazonaws.com'), iam.ServicePrincipal(service='ec2.amazonaws.com')), role_name="webmaintaskRole", managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name( "AmazonRDSFullAccess"), iam.ManagedPolicy.from_aws_managed_policy_name( "AmazonSQSFullAccess"), iam.ManagedPolicy.from_aws_managed_policy_name( "AmazonS3FullAccess"), iam.ManagedPolicy.from_aws_managed_policy_name( "CloudWatchFullAccess"), iam.ManagedPolicy.from_aws_managed_policy_name( "AmazonDynamoDBFullAccess"), iam.ManagedPolicy.from_aws_managed_policy_name( "AmazonRedshiftFullAccess"), iam.ManagedPolicy.from_aws_managed_policy_name( "AmazonKinesisFullAccess"), iam.ManagedPolicy.from_aws_managed_policy_name( "service-role/AmazonECSTaskExecutionRolePolicy"), iam.ManagedPolicy.from_aws_managed_policy_name( "AmazonSNSFullAccess"), iam.ManagedPolicy.from_aws_managed_policy_name( "service-role/AWSLambdaRole"), iam.ManagedPolicy(self, id="ManagedPolicy", managed_policy_name="Grant_dev", statements=[ iam.PolicyStatement(actions=[ "kms:Decrypt", "secretemanager:GetSecreteValue" ], resources=["*"]) ]) ]) # taskRole.add_managed_policy(iam.ManagedPolicy.from_aws_managed_policy_name("AmazonRDSFullAccess")) # WebApp Main task Defenition & Service webmain_task_definition = ecs.FargateTaskDefinition( self, "WebAppMain", memory_limit_mib=512, cpu=256, task_role=taskRole, execution_role=taskRole) webmain_container = webmain_task_definition.add_container( "webapp-mainContainer", image=ecs.ContainerImage.from_registry("amazon/amazon-ecs-sample"), environment=EcsStack.commands, docker_labels={ "com.datadoghq.ad.instances": "[{\"host\": \"%%host%%\", \"port\": 80}]", "com.datadoghq.ad.check_names": "[\"ecs_fargate\"]", "com.datadoghq.ad.init_configs": "[{}]" }, logging=ecs.LogDriver.aws_logs(stream_prefix="awslogs")) # Clearing the environment vairables from the commands(Map) EcsStack.commands.clear() EcsStack.readConfig(1) webmain_datadog_container = webmain_task_definition.add_container( "webapp-main_datadog_Container", image=ecs.ContainerImage.from_registry("amazon/amazon-ecs-sample"), environment=EcsStack.commands) webmain_port_mapping = ecs.PortMapping(container_port=80, host_port=80, protocol=ecs.Protocol.TCP) datadog_port_mapping1 = ecs.PortMapping(container_port=8126, host_port=8126, protocol=ecs.Protocol.TCP) datadog_port_mapping2 = ecs.PortMapping(container_port=8125, host_port=8125, protocol=ecs.Protocol.TCP) webmain_container.add_port_mappings(webmain_port_mapping) webmain_datadog_container.add_port_mappings(datadog_port_mapping1) webmain_datadog_container.add_port_mappings(datadog_port_mapping2) # Security group for service webmain_sg = ec.SecurityGroup(self, "webmain_sg", vpc=vpc, allow_all_outbound=True, security_group_name="WebAppMain") webmain_sg.add_ingress_rule(peer=Peer.ipv4("202.65.133.194/32"), connection=Port.tcp(5432)) webmain_service = ecs.FargateService( self, "webapp-main", cluster=cluster, task_definition=webmain_task_definition, desired_count=1, security_group=webmain_sg) # defining the load balancer webmain_lb = elbv2.ApplicationLoadBalancer( self, "LB", vpc=vpc, internet_facing=True, load_balancer_name="WebAppMain", # security_group= vpc_subnets=ec.SubnetSelection(subnet_type=ec.SubnetType.PUBLIC)) webmain_target_grp = elbv2.ApplicationTargetGroup( self, id="webapp-main-target", port=80, protocol=elbv2.ApplicationProtocol.HTTP, health_check=elbv2.HealthCheck(healthy_http_codes="200-399", healthy_threshold_count=2, unhealthy_threshold_count=2, port="traffic-port", protocol=elbv2.Protocol.HTTP, timeout=core.Duration.seconds(6), interval=core.Duration.seconds(10)), targets=[webmain_service], target_group_name="WebAppMain", target_type=elbv2.TargetType.IP, vpc=vpc) listener = webmain_lb.add_listener( "webMain_Listener", port=443, open=True, default_target_groups=[webmain_target_grp], certificate_arns=[ "arn:aws:acm:us-west-2:384853870836:certificate/182c0fdd-813f-4bd3-aee1-0b4543cfb52b" ]) listener2 = webmain_lb.add_listener( "webMain_Listener2", port=80, # default_target_groups=[webmain_target_grp] ) # elbv2.ApplicationListenerCertificate(self,"WebAppMAin_Certificate",listener=listener,certificate_arns=["arn:aws:acm:us-west-2:384853870836:certificate/182c0fdd-813f-4bd3-aee1-0b4543cfb52b"]) listener2.add_redirect_response(id="HttptoHttps", status_code="HTTP_301", port="443", protocol="HTTPS")
def __init__(self, scope: cdk.Construct, construct_id: str, gitlab: cdk.Stack, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) tags = cdk.Tags.of(self) tags.add(key='Stage', value='DevOps') tags.add(key='Module', value='Runner') tags.add(key='Owner', value='Vunk.Lai') tags.add(key='Name', value='GitLab/Runner', apply_to_launched_instances=True) subnets = gitlab.vpc.select_subnets(subnet_group_name='Runner').subnets security_group = ec2.SecurityGroup( self, 'sg', vpc=gitlab.vpc, security_group_name='GitLab/Runner:SecurityGroup', description='Default Runner Security Group', allow_all_outbound=True) policy = iam.ManagedPolicy( self, 'policy', # Use alphanumeric and '+=,.@-_' characters managed_policy_name='GitLab-Runner_Policy', description='SSM Login', statements=[ iam.PolicyStatement( actions=['ssmmessages:*', 'ssm:UpdateInstanceInformation'], resources=['*']), ]) role = iam.Role( self, 'role', # Use alphanumeric and '+=,.@-_' characters role_name='GitLab-Runner_Role', assumed_by=iam.ServicePrincipal('ec2.amazonaws.com'), managed_policies=[policy]) folder = Path(__file__).parent.parent / 'user_data' user_data = ec2.UserData.for_linux() user_data.add_commands( 'apt install unzip', 'curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "aws_cli_v2.zip"', 'unzip aws_cli_v2.zip', 'sudo ./aws/install', 'aws --version') asset = Asset(self, 'asset:userdata', path=str(folder / 'runner.sh')) asset.grant_read(role) path = user_data.add_s3_download_command( bucket=asset.bucket, bucket_key=asset.s3_object_key) user_data.add_execute_file_command( file_path=path, arguments='--verbose -y') template = ec2.LaunchTemplate( self, 'template', launch_template_name='GitLab/Runner_LaunchTemplate', cpu_credits=ec2.CpuCredits.STANDARD, instance_type=ec2.InstanceType.of( ec2.InstanceClass.BURSTABLE3, ec2.InstanceSize.MICRO), machine_image=ec2.MachineImage.lookup( name='ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-*', owners=['099720109477']), role=role, security_group=security_group, user_data=user_data, block_devices=[ ec2.BlockDevice( device_name='/dev/sda1', volume=ec2.BlockDeviceVolume.ebs( volume_size=20, volume_type=ec2.EbsDeviceVolumeType.GP3, delete_on_termination=True, )), ] ) ec2.CfnInstance( self, 'instance', launch_template=ec2.CfnInstance.LaunchTemplateSpecificationProperty( version=template.latest_version_number, launch_template_id=template.launch_template_id, ), subnet_id=subnets[0].subnet_id )