def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) # CloudTrailの暗号化キーを作成 trail_key = kms.Key(self, 'TrailKey', alias='TrailKey', enable_key_rotation=True) # 証跡を作成 cloudtrail.Trail(self, 'trail', trail_name='trail', encryption_key=trail_key, send_to_cloud_watch_logs=True) # CloudTrailに暗号化権限を付与するポリシーを定義 key_policy = iam.PolicyStatement() key_policy.add_service_principal("cloudtrail.amazonaws.com") key_policy.add_actions("kms:GenerateDataKey*") key_policy.add_all_resources() key_policy.add_condition( 'StringLike', { 'kms:EncryptionContext:aws:cloudtrail:arn': 'arn:aws:cloudtrail:*:' + self.account + ':trail/*' }) # 暗号化キーにポリシーを付与 trail_key.add_to_resource_policy(key_policy)
def __init__(self, scope: core.Construct, id: str, vpc: ec2.IVpc, **kwargs) -> None: super().__init__(scope, id, **kwargs) self.trail = cloudtrail.Trail(self, 'ECSWorkshopCloudTrail') self.cluster = ecs.Cluster(scope=self, id='OpsCluster', vpc=vpc) # Adding service discovery namespace to cluster self.cluster.add_default_cloud_map_namespace(name="service", )
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) user = iam.User(self, 'myuser', managed_policies=[iam.ManagedPolicy.from_aws_managed_policy_name('AdministratorAccess')]) trail = cloudtrail.Trail(self, 's3-account-activity', enable_file_validation=True, include_global_service_events=True, is_multi_region_trail=True, management_events=cloudtrail.ReadWriteType.ALL) fn = _lambda.Function(self, 'cloudtrail_reactivator', description='Reactivates stopped CloudTrail logs', code=_lambda.Code.from_asset('./lambda'), handler='cloudtrail_reactivator.handler', runtime=_lambda.Runtime.PYTHON_3_8, initial_policy=[ # Allow Lambda to re-activate CloudTrail logging. iam.PolicyStatement(resources=[trail.trail_arn], actions=['cloudtrail:DescribeTrails', 'cloudtrail:GetTrailStatus', 'cloudtrail:StartLogging'], effect=iam.Effect.ALLOW), # Allow Lambda to attach policies to user. iam.PolicyStatement(resources=[user.user_arn], actions=['iam:AttachUserPolicy'], effect=iam.Effect.ALLOW, conditions={'ArnEquals': {"iam:PolicyARN": "arn:aws:iam::aws:policy/AWSDenyAll"}}) ]) topic = sns.Topic(self, 'CloudTrailLoggingStateTransition') topic.add_subscription(subs.EmailSubscription('*****@*****.**')) topic.grant_publish(fn) fn.add_environment('SNS_ARN', topic.topic_arn) # Event Pattern that defines the CloudTrail events that should trigger # the Lambda. event_pattern = events.EventPattern(source=['aws.cloudtrail'], detail={'eventName': ['StopLogging', 'DeleteTrail', 'UpdateTrail', 'RemoveTags', 'AddTags', 'CreateTrail', 'StartLogging', 'PutEventSelectors'], 'eventSource': ['cloudtrail.amazonaws.com']}) trail.on_cloud_trail_event('CloudTrailStateChange', description='Detects CloudTrail log state changes', target=events_targets.LambdaFunction(fn), event_pattern=event_pattern)
def __init__(self, scope: core.Construct, id: str, s3bucket, **kwargs) -> None: super().__init__(scope, id, **kwargs) prj_name = self.node.try_get_context("project_name") env_name = self.node.try_get_context("env") trail = cloudtrail.Trail( self, 'cloudtrail-id', bucket=s3bucket, trail_name=prj_name + '-' + env_name + '-trail', )
def __init__(self, scope: core.App, id: str, props, **kwargs) -> None: super().__init__(scope, id, **kwargs) # Setup CloudTrail to stream logs to CloudWatch logs log_group=aws_logs.LogGroup(self, "CloudTrailLogs", log_group_name="CloudTrailLogs", retention=aws_logs.RetentionDays.TWO_WEEKS) aws_cloudtrail.Trail(self, "OpsTrail", cloud_watch_log_group=log_group, cloud_watch_logs_retention=aws_logs.RetentionDays.TWO_WEEKS, send_to_cloud_watch_logs=True, trail_name="OpsTrail") self.config_source_bucket = aws_s3.Bucket(self, "AmplifyConfigBucket", bucket_name=f"amplify-ci-assets-{self.account}", removal_policy=core.RemovalPolicy.DESTROY)
def event_rules(self, fn_process_transfer_task): trail = trail_.Trail(self, "CloudTrail", send_to_cloud_watch_logs=True) event_rule = trail.on_event( self, "S3JobEvent", target=targets.LambdaFunction(handler=fn_process_transfer_task)) event_rule.add_event_pattern( source=['aws.s3'], detail_type=["AWS Service Event via CloudTrail"], detail={ "eventSource": ["s3.amazonaws.com"], "eventName": ["JobStatusChanged"], "serviceEventDetails": { "status": ["Complete"] } })
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) cloud_trail = aws_cloudtrail.Trail(self, "Trail") self.hello_docker_repo = aws_ecr.Repository( self, "HelloDocker", repository_name="hello-docker", lifecycle_rules=[aws_ecr.LifecycleRule(max_image_count=10)], removal_policy=core.RemovalPolicy.DESTROY) self.hello_docker_repo.on_cloud_trail_image_pushed("ImagePushed") self.tweet_ingest_repo = aws_ecr.Repository( self, "TweetIngest", repository_name="tweet-ingest", lifecycle_rules=[aws_ecr.LifecycleRule(max_image_count=10)], removal_policy=core.RemovalPolicy.DESTROY) self.tweet_ingest_repo.on_cloud_trail_image_pushed("ImagePushed")
def __init__(self, scope: cdk.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) ### Parameters bootstrap_script_args = cdk.CfnParameter(self, 'BootstrapScriptArgs', type='String', default='', description='Space seperated arguments passed to the bootstrap script.' ) # create a VPC vpc = ec2.Vpc(self, 'VPC', cidr='10.0.0.0/16', max_azs=99) # create a private and public subnet per vpc selection = vpc.select_subnets( subnet_type=ec2.SubnetType.PRIVATE ) # Output created subnets for i, public_subnet in enumerate(vpc.public_subnets): cdk.CfnOutput(self, 'PublicSubnet%i' % i, value=public_subnet.subnet_id) for i, private_subnet in enumerate(vpc.private_subnets): cdk.CfnOutput(self, 'PrivateSubnet%i' % i, value=private_subnet.subnet_id) cdk.CfnOutput(self, 'VPCId', value=vpc.vpc_id) # Create a Bucket bucket = s3.Bucket(self, "DataRepository") quickstart_bucket = s3.Bucket.from_bucket_name(self, 'QuickStartBucket', 'aws-quickstart') # Upload Bootstrap Script to that bucket bootstrap_script = assets.Asset(self, 'BootstrapScript', path='scripts/bootstrap.sh' ) # Upload parallel cluster post_install_script to that bucket pcluster_post_install_script = assets.Asset(self, 'PclusterPostInstallScript', path='scripts/post_install_script.sh' ) # Setup CloudTrail cloudtrail.Trail(self, 'CloudTrail', bucket=bucket) # Create a Cloud9 instance # Cloud9 doesn't have the ability to provide userdata # Because of this we need to use SSM run command cloud9_instance = cloud9.Ec2Environment(self, 'Cloud9Env', vpc=vpc, instance_type=ec2.InstanceType(instance_type_identifier='c5.large')) cdk.CfnOutput(self, 'URL', value=cloud9_instance.ide_url) # Create a keypair in lambda and store the private key in SecretsManager c9_createkeypair_role = iam.Role(self, 'Cloud9CreateKeypairRole', assumed_by=iam.ServicePrincipal('lambda.amazonaws.com')) c9_createkeypair_role.add_managed_policy(iam.ManagedPolicy.from_aws_managed_policy_name('service-role/AWSLambdaBasicExecutionRole')) # Add IAM permissions to the lambda role c9_createkeypair_role.add_to_policy(iam.PolicyStatement( actions=[ 'ec2:CreateKeyPair', 'ec2:DeleteKeyPair' ], resources=['*'], )) # Lambda for Cloud9 keypair c9_createkeypair_lambda = _lambda.Function(self, 'C9CreateKeyPairLambda', runtime=_lambda.Runtime.PYTHON_3_6, handler='lambda_function.handler', timeout=cdk.Duration.seconds(300), role=c9_createkeypair_role, code=_lambda.Code.asset('functions/source/c9keypair'), # code=_lambda.Code.from_bucket( ) c9_createkeypair_provider = cr.Provider(self, "C9CreateKeyPairProvider", on_event_handler=c9_createkeypair_lambda) c9_createkeypair_cr = cfn.CustomResource(self, "C9CreateKeyPair", provider=c9_createkeypair_provider, properties={ 'ServiceToken': c9_createkeypair_lambda.function_arn } ) #c9_createkeypair_cr.node.add_dependency(instance_id) c9_ssh_private_key_secret = secretsmanager.CfnSecret(self, 'SshPrivateKeySecret', secret_string=c9_createkeypair_cr.get_att_string('PrivateKey') ) # The iam policy has a <REGION> parameter that needs to be replaced. # We do it programmatically so future versions of the synth'd stack # template include all regions. with open('iam/ParallelClusterUserPolicy.json') as json_file: data = json.load(json_file) for s in data['Statement']: if s['Sid'] == 'S3ParallelClusterReadOnly': s['Resource'] = [] for r in region_info.RegionInfo.regions: s['Resource'].append('arn:aws:s3:::{0}-aws-parallelcluster*'.format(r.name)) parallelcluster_user_policy = iam.CfnManagedPolicy(self, 'ParallelClusterUserPolicy', policy_document=iam.PolicyDocument.from_json(data)) # Cloud9 IAM Role cloud9_role = iam.Role(self, 'Cloud9Role', assumed_by=iam.ServicePrincipal('ec2.amazonaws.com')) cloud9_role.add_managed_policy(iam.ManagedPolicy.from_aws_managed_policy_name('AmazonSSMManagedInstanceCore')) cloud9_role.add_managed_policy(iam.ManagedPolicy.from_aws_managed_policy_name('AWSCloud9User')) cloud9_role.add_managed_policy(iam.ManagedPolicy.from_managed_policy_arn(self, 'AttachParallelClusterUserPolicy', parallelcluster_user_policy.ref)) cloud9_role.add_to_policy(iam.PolicyStatement( resources=['*'], actions=[ 'ec2:DescribeInstances', 'ec2:DescribeVolumes', 'ec2:ModifyVolume' ] )) cloud9_role.add_to_policy(iam.PolicyStatement( resources=[c9_ssh_private_key_secret.ref], actions=[ 'secretsmanager:GetSecretValue' ] )) bootstrap_script.grant_read(cloud9_role) pcluster_post_install_script.grant_read(cloud9_role) # Cloud9 User # user = iam.User(self, 'Cloud9User', password=cdk.SecretValue.plain_text('supersecretpassword'), password_reset_required=True) # Cloud9 Setup IAM Role cloud9_setup_role = iam.Role(self, 'Cloud9SetupRole', assumed_by=iam.ServicePrincipal('lambda.amazonaws.com')) cloud9_setup_role.add_managed_policy(iam.ManagedPolicy.from_aws_managed_policy_name('service-role/AWSLambdaBasicExecutionRole')) # Allow pcluster to be run in bootstrap cloud9_setup_role.add_managed_policy(iam.ManagedPolicy.from_managed_policy_arn(self, 'AttachParallelClusterUserPolicySetup', parallelcluster_user_policy.ref)) # Add IAM permissions to the lambda role cloud9_setup_role.add_to_policy(iam.PolicyStatement( actions=[ 'cloudformation:DescribeStackResources', 'ec2:AssociateIamInstanceProfile', 'ec2:AuthorizeSecurityGroupIngress', 'ec2:DescribeInstances', 'ec2:DescribeInstanceStatus', 'ec2:DescribeInstanceAttribute', 'ec2:DescribeIamInstanceProfileAssociations', 'ec2:DescribeVolumes', 'ec2:DesctibeVolumeAttribute', 'ec2:DescribeVolumesModifications', 'ec2:DescribeVolumeStatus', 'ssm:DescribeInstanceInformation', 'ec2:ModifyVolume', 'ec2:ReplaceIamInstanceProfileAssociation', 'ec2:ReportInstanceStatus', 'ssm:SendCommand', 'ssm:GetCommandInvocation', 's3:GetObject', 'lambda:AddPermission', 'lambda:RemovePermission', 'events:PutRule', 'events:DeleteRule', 'events:PutTargets', 'events:RemoveTargets', ], resources=['*'], )) cloud9_setup_role.add_to_policy(iam.PolicyStatement( actions=['iam:PassRole'], resources=[cloud9_role.role_arn] )) cloud9_setup_role.add_to_policy(iam.PolicyStatement( actions=[ 'lambda:AddPermission', 'lambda:RemovePermission' ], resources=['*'] )) # Cloud9 Instance Profile c9_instance_profile = iam.CfnInstanceProfile(self, "Cloud9InstanceProfile", roles=[cloud9_role.role_name]) # Lambda to add Instance Profile to Cloud9 c9_instance_profile_lambda = _lambda.Function(self, 'C9InstanceProfileLambda', runtime=_lambda.Runtime.PYTHON_3_6, handler='lambda_function.handler', timeout=cdk.Duration.seconds(900), role=cloud9_setup_role, code=_lambda.Code.asset('functions/source/c9InstanceProfile'), ) c9_instance_profile_provider = cr.Provider(self, "C9InstanceProfileProvider", on_event_handler=c9_instance_profile_lambda, ) instance_id = cfn.CustomResource(self, "C9InstanceProfile", provider=c9_instance_profile_provider, properties={ 'InstanceProfile': c9_instance_profile.ref, 'Cloud9Environment': cloud9_instance.environment_id, } ) instance_id.node.add_dependency(cloud9_instance) # Lambda for Cloud9 Bootstrap c9_bootstrap_lambda = _lambda.Function(self, 'C9BootstrapLambda', runtime=_lambda.Runtime.PYTHON_3_6, handler='lambda_function.handler', timeout=cdk.Duration.seconds(900), role=cloud9_setup_role, code=_lambda.Code.asset('functions/source/c9bootstrap'), ) c9_bootstrap_provider = cr.Provider(self, "C9BootstrapProvider", on_event_handler=c9_bootstrap_lambda) c9_bootstrap_cr = cfn.CustomResource(self, "C9Bootstrap", provider=c9_bootstrap_provider, properties={ 'Cloud9Environment': cloud9_instance.environment_id, 'BootstrapPath': 's3://%s/%s' % (bootstrap_script.s3_bucket_name, bootstrap_script.s3_object_key), 'BootstrapArguments': bootstrap_script_args, 'VPCID': vpc.vpc_id, 'MasterSubnetID': vpc.public_subnets[0].subnet_id, 'ComputeSubnetID': vpc.private_subnets[0].subnet_id, 'PostInstallScriptS3Url': "".join( ['s3://', pcluster_post_install_script.s3_bucket_name, "/", pcluster_post_install_script.s3_object_key ] ), 'PostInstallScriptBucket': pcluster_post_install_script.s3_bucket_name, 'KeyPairId': c9_createkeypair_cr.ref, 'KeyPairSecretArn': c9_ssh_private_key_secret.ref } ) c9_bootstrap_cr.node.add_dependency(instance_id) c9_bootstrap_cr.node.add_dependency(c9_createkeypair_cr) c9_bootstrap_cr.node.add_dependency(c9_ssh_private_key_secret)
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # The code that defines your stack goes here ########################################################################### # AWS SECRETS MANAGER - Templated secret ########################################################################### # templated_secret = aws_secretsmanager.Secret(self, "TemplatedSecret", # generate_secret_string=aws_secretsmanager.SecretStringGenerator( # secret_string_template= "{\"username\":\"cleanbox\"}", # generate_string_key="password" # ) # ) ########################################################################### # CUSTOM CLOUDFORMATION RESOURCE ########################################################################### # customlambda = aws_lambda.Function(self,'customconfig', # handler='customconfig.on_event', # runtime=aws_lambda.Runtime.PYTHON_3_7, # code=aws_lambda.Code.asset('customconfig'), # ) # customlambda_statement = aws_iam.PolicyStatement(actions=["events:PutRule"], conditions=None, effect=None, not_actions=None, not_principals=None, not_resources=None, principals=None, resources=["*"], sid=None) # customlambda.add_to_role_policy(statement=customlambda_statement) # my_provider = cr.Provider(self, "MyProvider", # on_event_handler=customlambda, # # is_complete_handler=is_complete, # optional async "waiter" # log_retention=logs.RetentionDays.SIX_MONTHS # ) # CustomResource(self, 'customconfigresource', service_token=my_provider.service_token) ########################################################################### # AWS LAMBDA FUNCTIONS ########################################################################### sqs_to_elastic_cloud = aws_lambda.Function( self, 'sqs_to_elastic_cloud', handler='sqs_to_elastic_cloud.lambda_handler', runtime=aws_lambda.Runtime.PYTHON_3_7, code=aws_lambda.Code.asset('sqs_to_elastic_cloud'), memory_size=4096, timeout=core.Duration.seconds(301), log_retention=logs.RetentionDays.ONE_DAY) sqs_to_elasticsearch_service = aws_lambda.Function( self, 'sqs_to_elasticsearch_service', handler='sqs_to_elasticsearch_service.lambda_handler', runtime=aws_lambda.Runtime.PYTHON_3_7, code=aws_lambda.Code.asset('sqs_to_elasticsearch_service'), memory_size=4096, timeout=core.Duration.seconds(301), log_retention=logs.RetentionDays.ONE_DAY) ########################################################################### # AWS LAMBDA FUNCTIONS ########################################################################### ########################################################################### # AMAZON S3 BUCKETS ########################################################################### cloudtrail_log_bucket = aws_s3.Bucket(self, "cloudtrail_log_bucket") ########################################################################### # LAMBDA SUPPLEMENTAL POLICIES ########################################################################### lambda_supplemental_policy_statement = aws_iam.PolicyStatement( effect=aws_iam.Effect.ALLOW, actions=["s3:Get*", "s3:Head*", "s3:List*", "firehose:*", "es:*"], resources=["*"]) sqs_to_elastic_cloud.add_to_role_policy( lambda_supplemental_policy_statement) sqs_to_elasticsearch_service.add_to_role_policy( lambda_supplemental_policy_statement) ########################################################################### # AWS SNS TOPICS ########################################################################### cloudtrail_log_topic = aws_sns.Topic(self, "cloudtrail_log_topic") ########################################################################### # ADD AMAZON S3 BUCKET NOTIFICATIONS ########################################################################### cloudtrail_log_bucket.add_event_notification( aws_s3.EventType.OBJECT_CREATED, aws_s3_notifications.SnsDestination(cloudtrail_log_topic)) ########################################################################### # AWS SQS QUEUES ########################################################################### sqs_to_elasticsearch_service_queue_iqueue = aws_sqs.Queue( self, "sqs_to_elasticsearch_service_queue_dlq") sqs_to_elasticsearch_service_queue_dlq = aws_sqs.DeadLetterQueue( max_receive_count=10, queue=sqs_to_elasticsearch_service_queue_iqueue) sqs_to_elasticsearch_service_queue = aws_sqs.Queue( self, "sqs_to_elasticsearch_service_queue", visibility_timeout=core.Duration.seconds(300), dead_letter_queue=sqs_to_elasticsearch_service_queue_dlq) sqs_to_elastic_cloud_queue_iqueue = aws_sqs.Queue( self, "sqs_to_elastic_cloud_queue_dlq") sqs_to_elastic_cloud_queue_dlq = aws_sqs.DeadLetterQueue( max_receive_count=10, queue=sqs_to_elastic_cloud_queue_iqueue) sqs_to_elastic_cloud_queue = aws_sqs.Queue( self, "sqs_to_elastic_cloud_queue", visibility_timeout=core.Duration.seconds(300), dead_letter_queue=sqs_to_elastic_cloud_queue_dlq) ########################################################################### # AWS SNS TOPIC SUBSCRIPTIONS ########################################################################### cloudtrail_log_topic.add_subscription( aws_sns_subscriptions.SqsSubscription(sqs_to_elastic_cloud_queue)) cloudtrail_log_topic.add_subscription( aws_sns_subscriptions.SqsSubscription( sqs_to_elasticsearch_service_queue)) ########################################################################### # AWS LAMBDA SQS EVENT SOURCE ########################################################################### sqs_to_elastic_cloud.add_event_source( SqsEventSource(sqs_to_elastic_cloud_queue, batch_size=10)) sqs_to_elasticsearch_service.add_event_source( SqsEventSource(sqs_to_elasticsearch_service_queue, batch_size=10)) ########################################################################### # AWS ELASTICSEARCH DOMAIN ########################################################################### ########################################################################### # AWS ELASTICSEARCH DOMAIN ACCESS POLICY ########################################################################### this_aws_account = aws_iam.AccountPrincipal(account_id="012345678912") s3_to_elasticsearch_cloudtrail_logs_domain = aws_elasticsearch.Domain( self, "s3-to-elasticsearch-cloudtrail-logs-domain", version=aws_elasticsearch.ElasticsearchVersion.V7_1, capacity={ "master_nodes": 3, "data_nodes": 4 }, ebs={"volume_size": 100}, zone_awareness={"availability_zone_count": 2}, logging={ "slow_search_log_enabled": True, "app_log_enabled": True, "slow_index_log_enabled": True }) ########################################################################### # AMAZON COGNITO USER POOL ########################################################################### s3_to_elasticsearch_user_pool = aws_cognito.UserPool( self, "s3-to-elasticsearch-cloudtrial-logs-pool", account_recovery=None, auto_verify=None, custom_attributes=None, email_settings=None, enable_sms_role=None, lambda_triggers=None, mfa=None, mfa_second_factor=None, password_policy=None, self_sign_up_enabled=None, sign_in_aliases=aws_cognito.SignInAliases(email=True, phone=None, preferred_username=None, username=True), sign_in_case_sensitive=None, sms_role=None, sms_role_external_id=None, standard_attributes=None, user_invitation=None, user_pool_name=None, user_verification=None) sqs_to_elasticsearch_service.add_environment( "ELASTICSEARCH_HOST", s3_to_elasticsearch_cloudtrail_logs_domain.domain_endpoint) sqs_to_elasticsearch_service.add_environment( "QUEUEURL", sqs_to_elasticsearch_service_queue.queue_url) sqs_to_elasticsearch_service.add_environment("DEBUG", "False") sqs_to_elastic_cloud.add_environment("ELASTICCLOUD_SECRET_NAME", "-") sqs_to_elastic_cloud.add_environment("ELASTIC_CLOUD_ID", "-") sqs_to_elastic_cloud.add_environment("ELASTIC_CLOUD_PASSWORD", "-") sqs_to_elastic_cloud.add_environment("ELASTIC_CLOUD_USERNAME", "-") sqs_to_elastic_cloud.add_environment( "QUEUEURL", sqs_to_elastic_cloud_queue.queue_url) sqs_to_elastic_cloud.add_environment("DEBUG", "False") ########################################################################### # AWS COGNITO USER POOL ########################################################################### allevents_trail = aws_cloudtrail.Trail( self, "allevents_trail", bucket=cloudtrail_log_bucket, cloud_watch_log_group=None, cloud_watch_logs_retention=None, enable_file_validation=None, encryption_key=None, include_global_service_events=None, is_multi_region_trail=True, kms_key=None, management_events=aws_cloudtrail.ReadWriteType("ALL"), s3_key_prefix=None, send_to_cloud_watch_logs=False, sns_topic=None, trail_name=None)
def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) bucket_name = 'devassoc-monitored' bucket = s3.Bucket(self, 'bucket-monitored', bucket_name=bucket_name, removal_policy=core.RemovalPolicy.DESTROY, auto_delete_objects=True) core.CfnOutput(self, 'monitored-bucket', value=bucket.bucket_name) size_metric = cw.Metric(namespace='AWS/S3', metric_name='BucketSizeBytes', dimensions={ 'BucketName': bucket.bucket_name, 'StorageType': 'StandardStorage' }, period=core.Duration.days(1)) size_alarm = size_metric.create_alarm( self, 'bucket-alarm', alarm_name='S3 Storage Alarm', comparison_operator=cw.ComparisonOperator. GREATER_THAN_OR_EQUAL_TO_THRESHOLD, evaluation_periods=1, period=core.Duration.days(1), threshold=1000, actions_enabled=True) size_topic = sns.Topic(self, 'size-topic', display_name='My S3 Alarm List') email_param = ssm.StringParameter.from_string_parameter_name( self, 'email-param', 'notification-email') size_topic_sub = sns.Subscription( self, 'size-topic-sub', topic=size_topic, protocol=sns.SubscriptionProtocol.EMAIL, endpoint=email_param.string_value) size_action = cwa.SnsAction(size_topic) size_alarm.add_alarm_action(size_action) bucket_name = 'devassoc-s3-logs' log_bucket = s3.Bucket(self, 'bucket-s3-logs', bucket_name=bucket_name, removal_policy=core.RemovalPolicy.DESTROY, auto_delete_objects=True) s3_trail = ct.Trail(self, 'bucket-trail', bucket=log_bucket, trail_name='s3_logs') s3_trail.add_s3_event_selector([ct.S3EventSelector(bucket=bucket)]) s3_trail.log_all_s3_data_events() single_value_widget = cw.SingleValueWidget(metrics=[size_metric]) graph_widget = cw.GraphWidget(left=[size_metric]) cw.Dashboard(self, 'cloudwatch-dashboard', dashboard_name='S3Dashboard', widgets=[[single_value_widget, graph_widget]])
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # The start of the image pipeline imageBucket = aws_s3.Bucket(self, "imageBucket") # Capture API activity with a trail imageBucketTrail = aws_cloudtrail.Trail(self, "imageBucketTrail", is_multi_region_trail=False) # Restrict to S3 data-plane events imageBucketTrail.add_s3_event_selector( include_management_events=False, prefixes=[f"{imageBucket.bucket_arn}/"], read_write_type=aws_cloudtrail.ReadWriteType.WRITE_ONLY) # Filter to just PutObject and CopyObject events imageBucketRule = aws_events.Rule( self, "imageBucketRule", event_pattern={ "source": ["aws.s3"], "detail": { "eventSource": ["s3.amazonaws.com"], "eventName": ["PutObject", "CopyObject"], "requestParameters": { "bucketName": [imageBucket.bucket_name] } } }) #-- # Lambda Layers #--------------------# opencvLayer = aws_lambda.LayerVersion( self, 'opencvLayer', code=aws_lambda.AssetCode('layers/opencvLayer'), compatible_runtimes=[aws_lambda.Runtime.PYTHON_3_6]) boto3Layer = aws_lambda.LayerVersion( self, 'boto3Layer', code=aws_lambda.AssetCode('layers/boto3Layer'), compatible_runtimes=[aws_lambda.Runtime.PYTHON_3_6]) #-- # Lambda Functions #--------------------# # Gather info about an image, name, extension, etc getImageInfoFunc = aws_lambda.Function( self, "getImageInfoFunc", code=aws_lambda.AssetCode('functions/getImageInfoFunc'), handler="lambda.handler", runtime=aws_lambda.Runtime.PYTHON_3_6) # The home for the website webBucket = aws_s3.Bucket(self, "webBucket", website_index_document='index.html') # Copy the image to the web bucket copyImageFunc = aws_lambda.Function( self, "copyImageFunc", code=aws_lambda.AssetCode('functions/copyImageFunc'), handler="lambda.handler", runtime=aws_lambda.Runtime.PYTHON_3_6, layers=[boto3Layer], environment={ 'OUTPUTBUCKET': webBucket.bucket_name, 'OUTPUTPREFIX': 'images/' }) # Grant permissions to read from the source and write to the desination imageBucket.grant_read(copyImageFunc) webBucket.grant_write(copyImageFunc) # Create a thumbnail of the image and place in the web bucket createThumbnailFunc = aws_lambda.Function( self, "createThumbnailFunc", code=aws_lambda.AssetCode('functions/createThumbnailFunc'), handler="lambda.handler", runtime=aws_lambda.Runtime.PYTHON_3_6, layers=[boto3Layer, opencvLayer], timeout=core.Duration.seconds(10), memory_size=256, environment={ 'OUTPUTBUCKET': webBucket.bucket_name, 'OUTPUTPREFIX': 'images/' }) # Grant permissions to read from the source and write to the desination imageBucket.grant_read(createThumbnailFunc) webBucket.grant_write(createThumbnailFunc) # Store page information pageTable = aws_dynamodb.Table( self, 'pageTable', partition_key={ 'name': 'pageName', 'type': aws_dynamodb.AttributeType.STRING }, billing_mode=aws_dynamodb.BillingMode.PAY_PER_REQUEST, stream=aws_dynamodb.StreamViewType.NEW_IMAGE) # Save page and image information updatePageInfoFunc = aws_lambda.Function( self, "updatePageInfoFunc", code=aws_lambda.AssetCode('functions/updatePageInfoFunc'), handler="lambda.handler", runtime=aws_lambda.Runtime.PYTHON_3_6, layers=[boto3Layer], environment={ 'PAGETABLE': pageTable.table_name, 'PAGEPREFIX': 'posts/' }) # Grant permissions to write to the page table pageTable.grant_write_data(updatePageInfoFunc) imagePipelineDone = aws_stepfunctions.Succeed(self, "Done processing image") updatePageInfoJob = aws_stepfunctions.Task( self, 'Update page info', task=aws_stepfunctions_tasks.InvokeFunction(updatePageInfoFunc)) updatePageInfoJob.next(imagePipelineDone) copyImageJob = aws_stepfunctions.Task( self, 'Copy image', task=aws_stepfunctions_tasks.InvokeFunction(copyImageFunc)) createThumbnailJob = aws_stepfunctions.Task( self, 'Create thumbnail', task=aws_stepfunctions_tasks.InvokeFunction(createThumbnailFunc)) # These tasks can be done in parallel processImage = aws_stepfunctions.Parallel(self, 'Process image', result_path="$.images") processImage.branch(copyImageJob) processImage.branch(createThumbnailJob) processImage.next(updatePageInfoJob) # Results of file extension check notPng = aws_stepfunctions.Succeed(self, "Not a PNG") # Verify the file extension checkForPng = aws_stepfunctions.Choice(self, 'Is a PNG?') checkForPng.when( aws_stepfunctions.Condition.string_equals('$.extension', 'png'), processImage) checkForPng.otherwise(notPng) # A single image pipeline job for testing getImageInfoJob = aws_stepfunctions.Task( self, 'Get image info', task=aws_stepfunctions_tasks.InvokeFunction(getImageInfoFunc)) getImageInfoJob.next(checkForPng) # Configure the image pipeline and starting state imagePipeline = aws_stepfunctions.StateMachine( self, "imagePipeline", definition=getImageInfoJob) # Matching events start the image pipline imageBucketRule.add_target( aws_events_targets.SfnStateMachine( imagePipeline, input=aws_events.RuleTargetInput.from_event_path( "$.detail.requestParameters")))
def __init__( self, scope: core.Construct, id: str, **kwargs, ) -> None: super().__init__(scope, id, **kwargs) bucket = _s3.Bucket(self, "cloud-trail-logs") cloudtrail.Trail(self, "CloudTrail", bucket=bucket) # Create an IAM Role for Workflows lakeformation_workflow_role_id = "LakeFormationWorkflowRole" lakeFormationWorkflow = iam.PolicyDocument(statements=[ iam.PolicyStatement( actions=[ "lakeformation:GetDataAccess", "lakeformation:GrantPermissions", ], effect=iam.Effect.ALLOW, resources=["*"], ), iam.PolicyStatement( actions=["iam:PassRole"], effect=iam.Effect.ALLOW, resources=[ f"arn:aws:iam::{self.account}:role/{lakeformation_workflow_role_id}" ], ), # enable to read CloudTrail logs iam.PolicyStatement( actions=["s3:GetObject"], effect=iam.Effect.ALLOW, resources=[f"arn:aws:s3:::{bucket.bucket_name}/*"], ), ]) iam.Role( self, id=lakeformation_workflow_role_id, assumed_by=iam.ServicePrincipal("glue.amazonaws.com"), managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name( "service-role/AWSGlueServiceRole") ], inline_policies={"LakeFormationWorkflow": lakeFormationWorkflow}, ) # Create a Data Lake Administrator iam.User( self, id="DataLakeAdministrator", managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name( "AWSLakeFormationDataAdmin"), iam.ManagedPolicy.from_aws_managed_policy_name( "AWSGlueConsoleFullAccess"), iam.ManagedPolicy.from_aws_managed_policy_name( "CloudWatchLogsReadOnlyAccess"), ], ).attach_inline_policy( iam.Policy( self, id="LakeFormationSLR", statements=[ iam.PolicyStatement( actions=[ "iam:CreateServiceLinkedRole", ], effect=iam.Effect.ALLOW, resources=["*"], conditions={ "StringEquals": { "iam:AWSServiceName": "lakeformation.amazonaws.com" } }, ), iam.PolicyStatement( actions=["iam:PutRolePolicy"], effect=iam.Effect.ALLOW, resources=[ f"arn:aws:iam::{self.account}:role/aws-service-role/lakeformation.amazonaws.com/AWSServiceRoleForLakeFormationDataAccess" ], ), ], ))
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # Let us create the DENY ALL policy deny_iam_policy_statement = _iam.PolicyStatement( resources=['*'], actions=['iam:*'], effect=_iam.Effect.DENY, # sid='DenyIAMPermissions' ) deny_iam_policy_statement.sid = "DenyIAMPermissions" deny_iam_policy = _iam.ManagedPolicy( self, "deny_iam_policy", description="A policy to deny IAM permissions", managed_policy_name="deny_iam_privileges", statements=[deny_iam_policy_statement]) # Lambda Function that will quarantine the user try: with open("./lambda_src/revoke_iam_privileges.py", mode='r') as file: revoke_iam_privileges_fn_handler_code = file.read() except OSError as e: print(f'Unable to read file. ERROR:{str(e)}') revoke_iam_privileges_fn = _lambda.Function( self, id='revokeIamPrivilegesFnId', function_name="revoke_iam_privileges_fn", runtime=_lambda.Runtime.PYTHON_3_7, code=_lambda.InlineCode(revoke_iam_privileges_fn_handler_code), handler='index.lambda_handler', timeout=core.Duration.seconds(5), environment={ "ADMIN_GROUP_NAME": global_args.ADMIN_GROUP_NAME, "DENY_IAM_POLICY_ARN": f"{deny_iam_policy.managed_policy_arn}" }) revoke_iam_privileges_fn_perms = _iam.PolicyStatement( effect=_iam.Effect.ALLOW, resources=[ "*", ], actions=[ "iam:AttachRolePolicy", "iam:AttachUserPolicy", "iam:ListAttachedUserPolicies", "iam:ListGroupsForUser", "iam:PutUserPolicy", ]) revoke_iam_privileges_fn_perms.sid = "AllowLambdaToQuarantineUser" revoke_iam_privileges_fn.add_to_role_policy( revoke_iam_privileges_fn_perms) # Cloudwatch IAM Events Rule iam_evant_validator_targets = [] iam_evant_validator_targets.append( _targets.LambdaFunction(handler=revoke_iam_privileges_fn)) iam_event_pattern = _events.EventPattern( source=["aws.iam"], detail_type=["AWS API Call via CloudTrail"], detail={ "eventSource": ["iam.amazonaws.com"], "userIdentity": { "type": ["IAMUser"] } }) """ # Dedicted Event Bus for Security sec_event_bus = _events.EventBus( self, "securityEventBusId", event_bus_name=f"{global_args.OWNER}_security_event_bus" ) """ # Event Rule to trigger Lambda iam_event_rule = _events.Rule( self, "iamEventRuleId", event_pattern=iam_event_pattern, rule_name=f"iam_event_pattern_{global_args.OWNER}", # event_bus=sec_event_bus, enabled=True, description="Trigger an event for IAM Events", targets=iam_evant_validator_targets) # Lets create a cloudtrail to track API events _event_trail = _cloudtrail.Trail(self, "cloudEventTrailId", is_multi_region_trail=False, include_global_service_events=True, enable_file_validation=False, send_to_cloud_watch_logs=False) ########################################### ################# OUTPUTS ################# ########################################### output0 = core.CfnOutput( self, "SecuirtyAutomationBy", value=f"{global_args.SOURCE_INFO}", description= "To know more about this automation stack, check out our github page." ) output1 = core.CfnOutput( self, "LambdaFunction", value=(f"https://console.aws.amazon.com/lambda/home?region=" f"{core.Aws.REGION}" f"#functions/" f"{revoke_iam_privileges_fn.function_name}"), description="The Quarantine Lambda Function") output2 = core.CfnOutput( self, "CreateUser", value=(f"aws iam create-user --user-name Mystique$RANDOM"), description="command to create users") output3 = core.CfnOutput( self, "DeleteUser", value=(f"aws iam delete-user --user-name Mystique*"), description="command to delete users")
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # The code that defines your stack goes here pvt_bkt = _s3.Bucket(self, "s3bucket") core.Tag.add(pvt_bkt, key="isMonitoredBucket", value="True") # Lets create a cloudtrail to track s3 data events s3_data_event_trail = _cloudtrail.Trail( self, "s3DataEventTrailId", is_multi_region_trail=False, include_global_service_events=False, enable_file_validation=True) # Lets capture S3 Data Events only for our bucket- TO REDUCE COST s3_data_event_trail.add_s3_event_selector( prefixes=[f"{pvt_bkt.bucket_arn}/"], include_management_events=True, read_write_type=_cloudtrail.ReadWriteType.ALL) # Defines an AWS Lambda resource """ with open("lambda_src/make_object_private.py", encoding="utf8") as fp: make_object_private_fn_handler_code = fp.read() remediate_object_acl_fn = _lambda.Function( self, id='remediateObjAclFn', function_name="remediate_object_acl_fn", runtime=_lambda.Runtime.PYTHON_3_7, code=_lambda.InlineCode(make_object_private_fn_handler_code), handler='index.lambda_handler', timeout=core.Duration.seconds(10) ) # Lets add the necessary permission for the lambda function remediate_object_acl_fn_perms=_iam.PolicyStatement( effect=_iam.Effect.ALLOW, resources=[ "arn:aws:s3:::*", ], actions=[ "s3:GetObjectAcl", "s3:PutObjectAcl" ] ) remediate_object_acl_fn_perms.sid="PutBucketPolicy" remediate_object_acl_fn.add_to_role_policy( remediate_object_acl_fn_perms ) """ with open("lambda_src/is_object_private.py", encoding="utf8") as fp: is_object_private_fn_handler_code = fp.read() is_object_private_fn = _lambda.Function( self, id='isObjPrivateFn', function_name="is_object_private_fn", runtime=_lambda.Runtime.PYTHON_3_7, code=_lambda.InlineCode(is_object_private_fn_handler_code), handler='index.lambda_handler', timeout=core.Duration.seconds(3)) # Lets add the necessary permission for the lambda function is_object_private_fn_perms = _iam.PolicyStatement( effect=_iam.Effect.ALLOW, resources=[ "arn:aws:s3:::*", ], actions=["s3:GetObjectAcl"]) is_object_private_fn.sid = "CheckObjectAcl" is_object_private_fn.add_to_role_policy(is_object_private_fn_perms) with open("lambda_src/make_object_private.py", encoding="utf8") as fp: make_object_private_fn_handler_code = fp.read() remediate_object_acl_fn = _lambda.Function( self, id='remediateObjAclFn', function_name="remediate_object_acl_fn", runtime=_lambda.Runtime.PYTHON_3_7, code=_lambda.InlineCode(make_object_private_fn_handler_code), handler='index.lambda_handler', timeout=core.Duration.seconds(10)) # Lets add the necessary permission for the lambda function remediate_object_acl_fn_perms = _iam.PolicyStatement( effect=_iam.Effect.ALLOW, resources=[ "arn:aws:s3:::*", ], actions=["s3:PutObjectAcl"]) remediate_object_acl_fn_perms.sid = "PutObjectAcl" remediate_object_acl_fn.add_to_role_policy( remediate_object_acl_fn_perms) info_sec_ops_topic = _sns.Topic(self, "infoSecOpsTopicId", display_name="InfoSecTopic", topic_name="InfoSecOpsTopic") # Subscribe InfoSecOps Email to topic info_sec_ops_topic.add_subscription( _subs.EmailSubscription(global_args.INFO_SEC_OPS_EMAIL)) # Grant Lambda permission to publish to topic # info_sec_ops_topic.grant_publish(lambda_notifier) # State Machine for notifying failed ACLs # Ref: https://docs.aws.amazon.com/cdk/api/latest/docs/aws-stepfunctions-readme.html ############################################################################### ################# STEP FUNCTIONS EXPERIMENTAL CODE - UNSTABLE ################# ############################################################################### is_object_private_task = _sfn.Task( self, "isObjectPrivate?", task=_tasks.InvokeFunction(is_object_private_fn), result_path="$", output_path="$") remediate_object_acl_task = _sfn.Task( self, "RemediateObjectAcl", task=_tasks.InvokeFunction(remediate_object_acl_fn), result_path="$", output_path="$") notify_secops_task = _sfn.Task( self, "Notify InfoSecOps", task=_tasks.PublishToTopic( info_sec_ops_topic, integration_pattern=_sfn.ServiceIntegrationPattern. FIRE_AND_FORGET, message=_sfn.TaskInput.from_data_at("$.sns_message"), subject="Object Acl Remediation")) acl_remediation_failed_task = _sfn.Fail(self, "Acl Remediation Failed", cause="Acl Remediation Failed", error="Check Logs") acl_compliant_task = _sfn.Succeed(self, "Object Acl Compliant", comment="Object Acl is Compliant") remediate_object_acl_sfn_definition = is_object_private_task\ .next(_sfn.Choice(self, "Is Object Private?")\ .when(_sfn.Condition.boolean_equals("$.is_private", True), acl_compliant_task)\ .when(_sfn.Condition.boolean_equals("$.is_private", False), remediate_object_acl_task\ .next(_sfn.Choice(self, "Object Remediation Complete?")\ .when(_sfn.Condition.boolean_equals("$.status", True),acl_compliant_task)\ .when(_sfn.Condition.boolean_equals("$.status", False), notify_secops_task.next(acl_remediation_failed_task))\ .otherwise(acl_remediation_failed_task)\ ) ) .otherwise(acl_remediation_failed_task) ) remediate_object_acl_statemachine = _sfn.StateMachine( self, "stateMachineId", definition=remediate_object_acl_sfn_definition, timeout=core.Duration.minutes(3)) # Cloudwatch Event Triggers put_object_acl_event_targets = [] """ put_object_acl_event_targets.append( _targets.LambdaFunction( handler=remediate_object_acl_fn ) ) """ put_object_acl_event_targets.append( _targets.SfnStateMachine( machine=remediate_object_acl_statemachine)) put_object_acl_event_pattern = _events.EventPattern( source=["aws.s3"], detail_type=["AWS API Call via CloudTrail"], detail={ "eventSource": ["s3.amazonaws.com"], "eventName": ["PutObjectAcl", "PutObject"], "requestParameters": { "bucketName": [f"{pvt_bkt.bucket_name}"] } }) put_object_acl_event_pattern_rule = _events.Rule( self, "putObjectAclEventId", event_pattern=put_object_acl_event_pattern, rule_name=f"put_s3_policy_event_{global_args.OWNER}", enabled=True, description="Trigger an event for S3 PutObjectAcl or PutObject", targets=put_object_acl_event_targets) ########################################### ################# OUTPUTS ################# ########################################### output0 = core.CfnOutput( self, "SecuirtyAutomationFrom", value=f"{global_args.SOURCE_INFO}", description= "To know more about this automation stack, check out our github page." ) output1 = core.CfnOutput( self, "MonitoredS3Bucket", value=(f"https://console.aws.amazon.com/s3/buckets/" f"{pvt_bkt.bucket_name}"), description=f"S3 Bucket for testing purposes") output2 = core.CfnOutput( self, "Helpercommands", value= (f"aws s3api get-object-acl --bucket ${pvt_bkt.bucket_name} --key OBJECT-KEY-NAME" ), description= f"Commands to set object to public, Update OBJECT-KEY-NAME to your needs" )
def __init__(self, scope: core.Construct, id: str, region_name: str, db_name: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # CloudTrail bucket = s3.Bucket(self, 'TrailBucket', versioned=True) tail = cloudtrail.Trail(self, 'CloudTrail', bucket=bucket) db = glue.Database(self, 'cloudtrail', database_name=db_name) awg = core.CfnResource( self, 'AthenaWorkGroup', type="AWS::Athena::WorkGroup", properties={ "Name": f"{db_name}", "State": "ENABLED", "WorkGroupConfiguration": { "ResultConfiguration": { "OutputLocation": f"s3://{bucket.bucket_name}/athena_output/" } } }) # Pipeline for Working on Data project = codebuild.Project( self, 'learner_build', build_spec=codebuild.BuildSpec.from_source_filename( 'buildspec.yml'), environment_variables={ 'arn': { 'value': '-- Pur ARN Here --' }, 'athena_database': { 'value': db_name }, 'region_name': { 'value': region_name }, 'bucket': { 'value': bucket.bucket_name } }, source=codebuild.Source.s3(bucket=bucket, path='pipeline/learner.zip')) project.add_to_role_policy( iam.PolicyStatement(actions=['athena:*'], resources=['*'])) project.add_to_role_policy( iam.PolicyStatement(actions=['iam:*'], resources=['*'])) project.add_to_role_policy( iam.PolicyStatement(actions=['glue:*'], resources=['*'])) project.add_to_role_policy( iam.PolicyStatement(actions=['s3:*'], resources=['*'])) # Lambdas and Api GW api = agw.RestApi(self, "learner-api", rest_api_name="Learner Service", description="System to learn roles") switcher = lambda_.Function( self, "Switcher", runtime=lambda_.Runtime.PYTHON_3_8, code=lambda_.Code.from_asset("lambdas/switcher"), handler="main.handler", ) switcher.add_to_role_policy( iam.PolicyStatement(actions=['iam:*'], resources=['*'])) frontend = lambda_.Function( self, "Frontend", runtime=lambda_.Runtime.PYTHON_3_8, code=lambda_.Code.from_asset("lambdas/frontend"), handler="main.handler", ) learner = lambda_.Function( self, "Learner", runtime=lambda_.Runtime.PYTHON_3_8, code=lambda_.Code.from_asset("lambdas/learner"), handler="main.handler", environment={ 'codebuild': project.project_name, 'region_name': region_name }) learner.add_to_role_policy( iam.PolicyStatement(actions=['codebuild:StartBuild'], resources=[project.project_arn])) get_switcher_integration = agw.LambdaIntegration( switcher, request_templates={"application/json": '{ "statusCode": "200" }'}) get_frontend_integration = agw.LambdaIntegration( frontend, request_templates={"application/json": '{ "statusCode": "200" }'}) get_learner_integration = agw.LambdaIntegration( learner, request_templates={"application/json": '{ "statusCode": "200" }'}) api.root.add_method("GET", get_frontend_integration) switch = api.root.add_resource('switch') switch.add_method("GET", get_switcher_integration) learn = api.root.add_resource('learn') learn.add_method("GET", get_learner_integration) # Outputs core.CfnOutput(self, 'BucketName', value=bucket.bucket_name)
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) custom_allow_policy = iam.ManagedPolicy( self, "socialistir-custom-shub-write", managed_policy_name="socialistir-custom-shub-write", statements=[ iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ "s3:PutAnalyticsConfiguration", "s3:PutAccelerateConfiguration", "s3:DeleteObjectVersion", "s3:RestoreObject", "s3:CreateBucket", "s3:ReplicateObject", "s3:PutEncryptionConfiguration", "s3:DeleteBucketWebsite", "s3:AbortMultipartUpload", "s3:PutLifecycleConfiguration", "s3:DeleteObject", "s3:DeleteBucket", "s3:PutBucketVersioning", "s3:PutMetricsConfiguration", "s3:PutReplicationConfiguration", "s3:PutObjectLegalHold", "s3:PutBucketCORS", "s3:PutInventoryConfiguration", "s3:PutObject", "s3:PutBucketNotification", "s3:PutBucketWebsite", "s3:PutBucketRequestPayment", "s3:PutObjectRetention", "s3:PutBucketLogging", "s3:PutBucketObjectLockConfiguration", "s3:ReplicateDelete" ], resources=[ "arn:aws:s3:::socialistir-prod", "arn:aws:s3:::socialistir-prod/*" ]) ]) custom_deny_policy = iam.ManagedPolicy( self, "S3-Custom-Shub-Deny_Write", managed_policy_name="S3-Custom-Shub-Deny_Write", statements=[ iam.PolicyStatement( effect=iam.Effect.DENY, actions=[ "s3:PutAnalyticsConfiguration", "s3:PutAccelerateConfiguration", "s3:PutMetricsConfiguration", "s3:PutReplicationConfiguration", "s3:CreateBucket", "s3:PutBucketCORS", "s3:PutInventoryConfiguration", "s3:PutEncryptionConfiguration", "s3:PutBucketNotification", "s3:DeleteBucketWebsite", "s3:PutBucketWebsite", "s3:PutBucketRequestPayment", "s3:PutBucketLogging", "s3:PutLifecycleConfiguration", "s3:PutBucketObjectLockConfiguration", "s3:DeleteBucket", "s3:PutBucketVersioning", "s3:ReplicateObject", "s3:PutObject", "s3:AbortMultipartUpload", "s3:PutObjectRetention", "s3:DeleteObjectVersion", "s3:RestoreObject", "s3:PutObjectLegalHold", "s3:DeleteObject", "s3:ReplicateDelete" ], resources=[ "arn:aws:s3:::socialistir-prod", "arn:aws:s3:::socialistir-prod/*" ]) ]) devgroup1 = iam.Group(self, "Developer-socialistir", group_name="Developer-socialistir", managed_policies=[custom_allow_policy]) devgroup2 = iam.Group(self, "Developer-teamA", group_name="Developer-teamA") ############ This section should be executed only once in lifetime as ############################################ ############ AWS CDK does not support destion of S3 buckects yet and will ############################################ ############ throw errors on subsequent deploy or destruct or rollback ############################################ ############ S3 bucts are global across all AWS accounts ############################################ # bucket = s3.Bucket(self, id='socialistir-prod', bucket_name='socialistir-prod', versioned=True, website_error_document='index.html', website_index_document='index.html') trail = cloudtrail.Trail(self, "S3-Write-Operation-Trail") trail.add_s3_event_selector( ["arn:aws:s3:::socialistir-prod/"], include_management_events=True, read_write_type=cloudtrail.ReadWriteType.WRITE_ONLY) # ###################################################################################################################### topic = sns.Topic(self, "S3-Notification-Write", topic_name="S3-Notification-Write") topic.add_subscription(subs.EmailSubscription('*****@*****.**')) ep = { "source": ["aws.s3"], "detail": { "eventSource": ["s3.amazonaws.com"], "eventName": [ "ListObjects", "ListObjectVersions", "PutObject", "GetObject", "HeadObject", "CopyObject", "GetObjectAcl", "PutObjectAcl", "CreateMultipartUpload", "ListParts", "UploadPart", "CompleteMultipartUpload", "AbortMultipartUpload", "UploadPartCopy", "RestoreObject", "DeleteObject", "DeleteObjects", "GetObjectTorrent", "SelectObjectContent", "PutObjectLockRetention", "PutObjectLockLegalHold", "GetObjectLockRetention", "GetObjectLockLegalHold" ], "requestParameters": { "bucketName": ["socialistir-prod"] } } } rule = events.Rule(self, "Shub-s3", description='Rule created by CDK for S3 monitoring', enabled=True, rule_name="Shub-s3", event_pattern=ep) lambda_dir_path = os.path.join(os.getcwd(), "ir_cdk_stacks", "in_s3_01") response_lambda = _lambda.Function( self, "S3WriteIR", runtime=_lambda.Runtime.PYTHON_3_8, handler="lambda_function.lambda_handler", code=_lambda.Code.from_asset(lambda_dir_path), function_name="S3WriteIR") response_lambda.add_to_role_policy( iam.PolicyStatement( actions=[ "iam:*", "organizations:DescribeAccount", "organizations:DescribeOrganization", "organizations:DescribeOrganizationalUnit", "organizations:DescribePolicy", "organizations:ListChildren", "organizations:ListParents", "organizations:ListPoliciesForTarget", "organizations:ListRoots", "organizations:ListPolicies", "organizations:ListTargetsForPolicy" ], effect=iam.Effect.ALLOW, resources=["*"], )) response_lambda.add_to_role_policy( iam.PolicyStatement( actions=[ "iam:*", "organizations:DescribeAccount", "organizations:DescribeOrganization", "organizations:DescribeOrganizationalUnit", "organizations:DescribePolicy", "organizations:ListChildren", "organizations:ListParents", "organizations:ListPoliciesForTarget", "organizations:ListRoots", "organizations:ListPolicies", "organizations:ListTargetsForPolicy" ], effect=iam.Effect.ALLOW, resources=["*"], )) response_lambda.add_to_role_policy( iam.PolicyStatement( actions=["s3:*"], effect=iam.Effect.ALLOW, resources=["*"], )) response_lambda.add_to_role_policy( iam.PolicyStatement( actions=["sns:*"], effect=iam.Effect.ALLOW, resources=["*"], )) rule.add_target(event_target.LambdaFunction(response_lambda))
def __init__(self, scope: cdk.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # Version of ParallelCluster for Cloud9. pcluster_version = cdk.CfnParameter( self, 'ParallelClusterVersion', description= 'Specify a custom parallelcluster version. See https://pypi.org/project/aws-parallelcluster/#history for options.', default='2.8.0', type='String', allowed_values=get_version_list('aws-parallelcluster')) # S3 URI for Config file config = cdk.CfnParameter( self, 'ConfigS3URI', description='Set a custom parallelcluster config file.', default= 'https://notearshpc-quickstart.s3.amazonaws.com/{0}/config.ini'. format(__version__)) # Password password = cdk.CfnParameter( self, 'UserPasswordParameter', description='Set a password for the hpc-quickstart user', no_echo=True) # create a VPC vpc = ec2.Vpc( self, 'VPC', cidr='10.0.0.0/16', gateway_endpoints={ "S3": ec2.GatewayVpcEndpointOptions( service=ec2.GatewayVpcEndpointAwsService.S3), "DynamoDB": ec2.GatewayVpcEndpointOptions( service=ec2.GatewayVpcEndpointAwsService.DYNAMODB) }, max_azs=99) # create a private and public subnet per vpc selection = vpc.select_subnets(subnet_type=ec2.SubnetType.PRIVATE) # Output created subnets for i, public_subnet in enumerate(vpc.public_subnets): cdk.CfnOutput(self, 'PublicSubnet%i' % i, value=public_subnet.subnet_id) for i, private_subnet in enumerate(vpc.private_subnets): cdk.CfnOutput(self, 'PrivateSubnet%i' % i, value=private_subnet.subnet_id) cdk.CfnOutput(self, 'VPCId', value=vpc.vpc_id) # Create a Bucket data_bucket = s3.Bucket(self, "DataRepository") cdk.CfnOutput(self, 'DataRespository', value=data_bucket.bucket_name) cloudtrail_bucket = s3.Bucket(self, "CloudTrailLogs") quickstart_bucket = s3.Bucket.from_bucket_name(self, 'QuickStartBucket', 'aws-quickstart') # Upload Bootstrap Script to that bucket bootstrap_script = assets.Asset(self, 'BootstrapScript', path='scripts/bootstrap.sh') # Upload parallel cluster post_install_script to that bucket pcluster_post_install_script = assets.Asset( self, 'PclusterPostInstallScript', path='scripts/post_install_script.sh') # Upload parallel cluster post_install_script to that bucket pcluster_config_script = assets.Asset(self, 'PclusterConfigScript', path='scripts/config.ini') # Setup CloudTrail cloudtrail.Trail(self, 'CloudTrail', bucket=cloudtrail_bucket) # Create a Cloud9 instance # Cloud9 doesn't have the ability to provide userdata # Because of this we need to use SSM run command cloud9_instance = cloud9.Ec2Environment( self, 'ResearchWorkspace', vpc=vpc, instance_type=ec2.InstanceType( instance_type_identifier='c5.large')) cdk.CfnOutput(self, 'Research Workspace URL', value=cloud9_instance.ide_url) # Create a keypair in lambda and store the private key in SecretsManager c9_createkeypair_role = iam.Role( self, 'Cloud9CreateKeypairRole', assumed_by=iam.ServicePrincipal('lambda.amazonaws.com')) c9_createkeypair_role.add_managed_policy( iam.ManagedPolicy.from_aws_managed_policy_name( 'service-role/AWSLambdaBasicExecutionRole')) # Add IAM permissions to the lambda role c9_createkeypair_role.add_to_policy( iam.PolicyStatement( actions=['ec2:CreateKeyPair', 'ec2:DeleteKeyPair'], resources=['*'], )) # Lambda for Cloud9 keypair c9_createkeypair_lambda = _lambda.Function( self, 'C9CreateKeyPairLambda', runtime=_lambda.Runtime.PYTHON_3_6, handler='lambda_function.handler', timeout=cdk.Duration.seconds(300), role=c9_createkeypair_role, code=_lambda.Code.asset('functions/source/c9keypair'), ) c9_createkeypair_provider = cr.Provider( self, "C9CreateKeyPairProvider", on_event_handler=c9_createkeypair_lambda) c9_createkeypair_cr = cfn.CustomResource( self, "C9CreateKeyPair", provider=c9_createkeypair_provider, properties={'ServiceToken': c9_createkeypair_lambda.function_arn}) #c9_createkeypair_cr.node.add_dependency(instance_id) c9_ssh_private_key_secret = secretsmanager.CfnSecret( self, 'SshPrivateKeySecret', secret_string=c9_createkeypair_cr.get_att_string('PrivateKey')) # The iam policy has a <REGION> parameter that needs to be replaced. # We do it programmatically so future versions of the synth'd stack # template include all regions. with open('iam/ParallelClusterUserPolicy.json') as json_file: data = json.load(json_file) for s in data['Statement']: if s['Sid'] == 'S3ParallelClusterReadOnly': s['Resource'] = [] for r in region_info.RegionInfo.regions: s['Resource'].append( 'arn:aws:s3:::{0}-aws-parallelcluster*'.format( r.name)) parallelcluster_user_policy = iam.CfnManagedPolicy( self, 'ParallelClusterUserPolicy', policy_document=iam.PolicyDocument.from_json(data)) # Cloud9 IAM Role cloud9_role = iam.Role( self, 'Cloud9Role', assumed_by=iam.ServicePrincipal('ec2.amazonaws.com')) cloud9_role.add_managed_policy( iam.ManagedPolicy.from_aws_managed_policy_name( 'AmazonSSMManagedInstanceCore')) cloud9_role.add_managed_policy( iam.ManagedPolicy.from_aws_managed_policy_name('AWSCloud9User')) cloud9_role.add_managed_policy( iam.ManagedPolicy.from_managed_policy_arn( self, 'AttachParallelClusterUserPolicy', parallelcluster_user_policy.ref)) cloud9_role.add_to_policy( iam.PolicyStatement(resources=['*'], actions=[ 'ec2:DescribeInstances', 'ec2:DescribeVolumes', 'ec2:ModifyVolume' ])) cloud9_role.add_to_policy( iam.PolicyStatement(resources=[c9_ssh_private_key_secret.ref], actions=['secretsmanager:GetSecretValue'])) cloud9_role.add_to_policy( iam.PolicyStatement( actions=["s3:Get*", "s3:List*"], resources=[ "arn:aws:s3:::%s/*" % (data_bucket.bucket_name), "arn:aws:s3:::%s" % (data_bucket.bucket_name) ])) bootstrap_script.grant_read(cloud9_role) pcluster_post_install_script.grant_read(cloud9_role) pcluster_config_script.grant_read(cloud9_role) # Admin Group admin_group = iam.Group(self, 'AdminGroup') admin_group.add_managed_policy( iam.ManagedPolicy.from_aws_managed_policy_name( 'AdministratorAccess')) admin_group.add_managed_policy( iam.ManagedPolicy.from_aws_managed_policy_name( 'AWSCloud9Administrator')) # PowerUser Group poweruser_group = iam.Group(self, 'PowerUserGroup') poweruser_group.add_managed_policy( iam.ManagedPolicy.from_aws_managed_policy_name('PowerUserAccess')) poweruser_group.add_managed_policy( iam.ManagedPolicy.from_aws_managed_policy_name( 'AWSCloud9Administrator')) # HPC User user = iam.CfnUser( self, 'Researcher', groups=[admin_group.node.default_child.ref], login_profile=iam.CfnUser.LoginProfileProperty( password_reset_required=True, password=cdk.SecretValue.cfn_parameter(password).to_string())) create_user = cdk.CfnParameter(self, "CreateUser", default="false", type="String", allowed_values=['true', 'false' ]).value_as_string user_condition = cdk.CfnCondition(self, "UserCondition", expression=cdk.Fn.condition_equals( create_user, "true")) user.cfn_options.condition = user_condition cdk.CfnOutput(self, 'UserLoginUrl', value="".join([ "https://", self.account, ".signin.aws.amazon.com/console" ]), condition=user_condition) cdk.CfnOutput(self, 'UserName', value=user.ref, condition=user_condition) # Cloud9 Setup IAM Role cloud9_setup_role = iam.Role( self, 'Cloud9SetupRole', assumed_by=iam.ServicePrincipal('lambda.amazonaws.com')) cloud9_setup_role.add_managed_policy( iam.ManagedPolicy.from_aws_managed_policy_name( 'service-role/AWSLambdaBasicExecutionRole')) # Allow pcluster to be run in bootstrap cloud9_setup_role.add_managed_policy( iam.ManagedPolicy.from_managed_policy_arn( self, 'AttachParallelClusterUserPolicySetup', parallelcluster_user_policy.ref)) # Add IAM permissions to the lambda role cloud9_setup_role.add_to_policy( iam.PolicyStatement( actions=[ 'cloudformation:DescribeStackResources', 'ec2:AssociateIamInstanceProfile', 'ec2:AuthorizeSecurityGroupIngress', 'ec2:DescribeInstances', 'ec2:DescribeInstanceStatus', 'ec2:DescribeInstanceAttribute', 'ec2:DescribeIamInstanceProfileAssociations', 'ec2:DescribeVolumes', 'ec2:DesctibeVolumeAttribute', 'ec2:DescribeVolumesModifications', 'ec2:DescribeVolumeStatus', 'ssm:DescribeInstanceInformation', 'ec2:ModifyVolume', 'ec2:ReplaceIamInstanceProfileAssociation', 'ec2:ReportInstanceStatus', 'ssm:SendCommand', 'ssm:GetCommandInvocation', 's3:GetObject', 'lambda:AddPermission', 'lambda:RemovePermission', 'events:PutRule', 'events:DeleteRule', 'events:PutTargets', 'events:RemoveTargets', 'cloud9:CreateEnvironmentMembership', ], resources=['*'], )) cloud9_setup_role.add_to_policy( iam.PolicyStatement(actions=['iam:PassRole'], resources=[cloud9_role.role_arn])) cloud9_setup_role.add_to_policy( iam.PolicyStatement( actions=['lambda:AddPermission', 'lambda:RemovePermission'], resources=['*'])) # Cloud9 Instance Profile c9_instance_profile = iam.CfnInstanceProfile( self, "Cloud9InstanceProfile", roles=[cloud9_role.role_name]) # Lambda to add Instance Profile to Cloud9 c9_instance_profile_lambda = _lambda.Function( self, 'C9InstanceProfileLambda', runtime=_lambda.Runtime.PYTHON_3_6, handler='lambda_function.handler', timeout=cdk.Duration.seconds(900), role=cloud9_setup_role, code=_lambda.Code.asset('functions/source/c9InstanceProfile'), ) c9_instance_profile_provider = cr.Provider( self, "C9InstanceProfileProvider", on_event_handler=c9_instance_profile_lambda, ) instance_id = cfn.CustomResource(self, "C9InstanceProfile", provider=c9_instance_profile_provider, properties={ 'InstanceProfile': c9_instance_profile.ref, 'Cloud9Environment': cloud9_instance.environment_id, }) instance_id.node.add_dependency(cloud9_instance) # Lambda for Cloud9 Bootstrap c9_bootstrap_lambda = _lambda.Function( self, 'C9BootstrapLambda', runtime=_lambda.Runtime.PYTHON_3_6, handler='lambda_function.handler', timeout=cdk.Duration.seconds(900), role=cloud9_setup_role, code=_lambda.Code.asset('functions/source/c9bootstrap'), ) c9_bootstrap_provider = cr.Provider( self, "C9BootstrapProvider", on_event_handler=c9_bootstrap_lambda) c9_bootstrap_cr = cfn.CustomResource( self, "C9Bootstrap", provider=c9_bootstrap_provider, properties={ 'Cloud9Environment': cloud9_instance.environment_id, 'BootstrapPath': 's3://%s/%s' % (bootstrap_script.s3_bucket_name, bootstrap_script.s3_object_key), 'Config': config, 'VPCID': vpc.vpc_id, 'MasterSubnetID': vpc.public_subnets[0].subnet_id, 'ComputeSubnetID': vpc.private_subnets[0].subnet_id, 'PostInstallScriptS3Url': "".join([ 's3://', pcluster_post_install_script.s3_bucket_name, "/", pcluster_post_install_script.s3_object_key ]), 'PostInstallScriptBucket': pcluster_post_install_script.s3_bucket_name, 'S3ReadWriteResource': data_bucket.bucket_arn, 'S3ReadWriteUrl': 's3://%s' % (data_bucket.bucket_name), 'KeyPairId': c9_createkeypair_cr.ref, 'KeyPairSecretArn': c9_ssh_private_key_secret.ref, 'UserArn': user.attr_arn, 'PclusterVersion': pcluster_version.value_as_string }) c9_bootstrap_cr.node.add_dependency(instance_id) c9_bootstrap_cr.node.add_dependency(c9_createkeypair_cr) c9_bootstrap_cr.node.add_dependency(c9_ssh_private_key_secret) c9_bootstrap_cr.node.add_dependency(data_bucket) enable_budget = cdk.CfnParameter(self, "EnableBudget", default="true", type="String", allowed_values=['true', 'false' ]).value_as_string # Budgets budget_properties = { 'budgetType': "COST", 'timeUnit': "ANNUALLY", 'budgetLimit': { 'amount': cdk.CfnParameter( self, 'BudgetLimit', description= 'The initial budget for this project in USD ($).', default=2000, type='Number').value_as_number, 'unit': "USD", }, 'costFilters': None, 'costTypes': { 'includeCredit': False, 'includeDiscount': True, 'includeOtherSubscription': True, 'includeRecurring': True, 'includeRefund': True, 'includeSubscription': True, 'includeSupport': True, 'includeTax': True, 'includeUpfront': True, 'useAmortized': False, 'useBlended': False, }, 'plannedBudgetLimits': None, 'timePeriod': None, } email = { 'notification': { 'comparisonOperator': "GREATER_THAN", 'notificationType': "ACTUAL", 'threshold': 80, 'thresholdType': "PERCENTAGE", }, 'subscribers': [{ 'address': cdk.CfnParameter( self, 'NotificationEmail', description= 'This email address will receive billing alarm notifications when 80% of the budget limit is reached.', default='*****@*****.**').value_as_string, 'subscriptionType': "EMAIL", }] } overall_budget = budgets.CfnBudget( self, "HPCBudget", budget=budget_properties, notifications_with_subscribers=[email], ) overall_budget.cfn_options.condition = cdk.CfnCondition( self, "BudgetCondition", expression=cdk.Fn.condition_equals(enable_budget, "true"))
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) security_distribution_list_email = '*****@*****.**' # securityhub_instance = securityhub.CfnHub(self, 'SecurityHub') # Ensure AWS Config is enabled / Ensure CloudTrail is enabled in all Regions 2.1 - 2.8 cloudtrail_bucket_accesslogs = s3.Bucket( self, "CloudTrailS3Accesslogs", block_public_access=s3.BlockPublicAccess.BLOCK_ALL, encryption=s3.BucketEncryption.S3_MANAGED, removal_policy=core.RemovalPolicy.RETAIN) cloudtrail_bucket = s3.Bucket( self, "CloudTrailS3", block_public_access=s3.BlockPublicAccess.BLOCK_ALL, encryption=s3.BucketEncryption.S3_MANAGED, removal_policy=core.RemovalPolicy.RETAIN, server_access_logs_bucket=cloudtrail_bucket_accesslogs, ) cloudtrail_kms = kms.Key(self, "CloudTrailKey", enable_key_rotation=True) # CloudTrail - single account, not Organization trail = cloudtrail.Trail( self, "CloudTrail", enable_file_validation=True, is_multi_region_trail=True, include_global_service_events=True, send_to_cloud_watch_logs=True, cloud_watch_logs_retention=logs.RetentionDays.FOUR_MONTHS, bucket=cloudtrail_bucket, kms_key=cloudtrail_kms) cloudtrail_kms.grant(iam.ServicePrincipal('cloudtrail.amazonaws.com'), 'kms:DescribeKey') cloudtrail_kms.grant( iam.ServicePrincipal( 'cloudtrail.amazonaws.com', conditions={ 'StringLike': { 'kms:EncryptionContext:aws:cloudtrail:arn': 'arn:aws:cloudtrail:*:' + core.Stack.of(self).account + ':trail/*' } }), 'kms:GenerateDataKey*') cloudtrail_kms.add_to_resource_policy( iam.PolicyStatement( actions=["kms:Decrypt", "kms:ReEncryptFrom"], conditions={ 'StringEquals': { 'kms:CallerAccount': core.Stack.of(self).account }, 'StringLike': { 'kms:EncryptionContext:aws:cloudtrail:arn': 'arn:aws:cloudtrail:*:' + core.Stack.of(self).account + ':trail/*' } }, effect=iam.Effect.ALLOW, principals=[iam.AnyPrincipal()], resources=['*'])) cloudtrail_kms.add_to_resource_policy( iam.PolicyStatement(actions=["kms:CreateAlias"], conditions={ 'StringEquals': { 'kms:CallerAccount': core.Stack.of(self).account, 'kms:ViaService': 'ec2.' + core.Stack.of(self).region + '.amazonaws.com' } }, effect=iam.Effect.ALLOW, principals=[iam.AnyPrincipal()], resources=['*'])) cloudtrail_kms.add_to_resource_policy( iam.PolicyStatement( actions=["kms:Decrypt", "kms:ReEncryptFrom"], conditions={ 'StringEquals': { 'kms:CallerAccount': core.Stack.of(self).account }, 'StringLike': { 'kms:EncryptionContext:aws:cloudtrail:arn': 'arn:aws:cloudtrail:*:' + core.Stack.of(self).account + ':trail/*' } }, effect=iam.Effect.ALLOW, principals=[iam.AnyPrincipal()], resources=['*'])) config_role = iam.CfnServiceLinkedRole( self, id='ServiceLinkedRoleConfig', aws_service_name='config.amazonaws.com') global_config = config.CfnConfigurationRecorder(self, 'ConfigRecorder', name='default', # role_arn=config_role.role_arn, role_arn="arn:aws:iam::" + \ core.Stack.of( self).account+":role/aws-service-role/config.amazonaws.com/AWSServiceRoleForConfig", # role_arn=config_role.get_att( # attribute_name='resource.arn').to_string(), recording_group=config.CfnConfigurationRecorder.RecordingGroupProperty( all_supported=True, include_global_resource_types=True ) ) config_bucket = s3.Bucket( self, "ConfigS3", block_public_access=s3.BlockPublicAccess.BLOCK_ALL, encryption=s3.BucketEncryption.S3_MANAGED, removal_policy=core.RemovalPolicy.RETAIN, ) config_bucket.add_to_resource_policy( iam.PolicyStatement( actions=['s3:GetBucketAcl'], effect=iam.Effect.ALLOW, principals=[iam.ServicePrincipal('config.amazonaws.com')], resources=[config_bucket.bucket_arn])) config_bucket.add_to_resource_policy( iam.PolicyStatement( actions=['s3:PutObject'], effect=iam.Effect.ALLOW, principals=[iam.ServicePrincipal('config.amazonaws.com')], resources=[ config_bucket.arn_for_objects('AWSLogs/' + core.Stack.of(self).account + '/Config/*') ], conditions={ "StringEquals": { 's3:x-amz-acl': 'bucket-owner-full-control', } })) config_delivery_stream = config.CfnDeliveryChannel( self, "ConfigDeliveryChannel", s3_bucket_name=config_bucket.bucket_name) # Config Aggregator in Organizations account # config_aggregator = config.CfnConfigurationAggregator(self, 'ConfigAggregator', # configuration_aggregator_name='ConfigAggregator', # organization_aggregation_source=config.CfnConfigurationAggregator.OrganizationAggregationSourceProperty( # role_arn=iam.Role(self, "AWSConfigRoleForOrganizations", # assumed_by=iam.ServicePrincipal( # 'config.amazonaws.com'), # managed_policies=[iam.ManagedPolicy.from_aws_managed_policy_name( # 'service-role/AWSConfigRoleForOrganizations')] # ).role_arn, # all_aws_regions=True # ) # ) # 2.9 – Ensure VPC flow logging is enabled in all VPCs # vpc = ec2.Vpc.from_lookup(self, "VPC", # is_default=True, # ) # S3 for VPC flow logs # vpc_flow_logs_bucket = s3.Bucket(self, "VPCFlowLogsBucket", # block_public_access=s3.BlockPublicAccess.BLOCK_ALL, # encryption=s3.BucketEncryption.S3_MANAGED, # removal_policy=core.RemovalPolicy.RETAIN # ) # Ensure a log metric filter and alarm exist for 3.1 – 3.14 security_notifications_topic = sns.Topic(self, 'CIS_Topic', display_name='CIS_Topic', topic_name='CIS_Topic') sns.Subscription(self, 'CIS_Subscription', topic=security_notifications_topic, protocol=sns.SubscriptionProtocol.EMAIL, endpoint=security_distribution_list_email) cloudwatch_actions_cis = cloudwatch_actions.SnsAction( security_notifications_topic) cis_metricfilter_alarms = { 'CIS-3.1-UnauthorizedAPICalls': '($.errorCode="*UnauthorizedOperation") || ($.errorCode="AccessDenied*")', 'CIS-3.2-ConsoleSigninWithoutMFA': '($.eventName="ConsoleLogin") && ($.additionalEventData.MFAUsed !="Yes")', 'RootAccountUsageAlarm': '$.userIdentity.type="Root" && $.userIdentity.invokedBy NOT EXISTS && $.eventType !="AwsServiceEvent"', 'CIS-3.4-IAMPolicyChanges': '($.eventName=DeleteGroupPolicy) || ($.eventName=DeleteRolePolicy) || ($.eventName=DeleteUserPolicy) || ($.eventName=PutGroupPolicy) || ($.eventName=PutRolePolicy) || ($.eventName=PutUserPolicy) || ($.eventName=CreatePolicy) || ($.eventName=DeletePolicy) || ($.eventName=CreatePolicyVersion) || ($.eventName=DeletePolicyVersion) || ($.eventName=AttachRolePolicy) || ($.eventName=DetachRolePolicy) || ($.eventName=AttachUserPolicy) || ($.eventName=DetachUserPolicy) || ($.eventName=AttachGroupPolicy) || ($.eventName=DetachGroupPolicy)', 'CIS-3.5-CloudTrailChanges': '($.eventName=CreateTrail) || ($.eventName=UpdateTrail) || ($.eventName=DeleteTrail) || ($.eventName=StartLogging) || ($.eventName=StopLogging)', 'CIS-3.6-ConsoleAuthenticationFailure': '($.eventName=ConsoleLogin) && ($.errorMessage="Failed authentication")', 'CIS-3.7-DisableOrDeleteCMK': '($.eventSource=kms.amazonaws.com) && (($.eventName=DisableKey) || ($.eventName=ScheduleKeyDeletion))', 'CIS-3.8-S3BucketPolicyChanges': '($.eventSource=s3.amazonaws.com) && (($.eventName=PutBucketAcl) || ($.eventName=PutBucketPolicy) || ($.eventName=PutBucketCors) || ($.eventName=PutBucketLifecycle) || ($.eventName=PutBucketReplication) || ($.eventName=DeleteBucketPolicy) || ($.eventName=DeleteBucketCors) || ($.eventName=DeleteBucketLifecycle) || ($.eventName=DeleteBucketReplication))', 'CIS-3.9-AWSConfigChanges': '($.eventSource=config.amazonaws.com) && (($.eventName=StopConfigurationRecorder) || ($.eventName=DeleteDeliveryChannel) || ($.eventName=PutDeliveryChannel) || ($.eventName=PutConfigurationRecorder))', 'CIS-3.10-SecurityGroupChanges': '($.eventName=AuthorizeSecurityGroupIngress) || ($.eventName=AuthorizeSecurityGroupEgress) || ($.eventName=RevokeSecurityGroupIngress) || ($.eventName=RevokeSecurityGroupEgress) || ($.eventName=CreateSecurityGroup) || ($.eventName=DeleteSecurityGroup)', 'CIS-3.11-NetworkACLChanges': '($.eventName=CreateNetworkAcl) || ($.eventName=CreateNetworkAclEntry) || ($.eventName=DeleteNetworkAcl) || ($.eventName=DeleteNetworkAclEntry) || ($.eventName=ReplaceNetworkAclEntry) || ($.eventName=ReplaceNetworkAclAssociation)', 'CIS-3.12-NetworkGatewayChanges': '($.eventName=CreateCustomerGateway) || ($.eventName=DeleteCustomerGateway) || ($.eventName=AttachInternetGateway) || ($.eventName=CreateInternetGateway) || ($.eventName=DeleteInternetGateway) || ($.eventName=DetachInternetGateway)', 'CIS-3.13-RouteTableChanges': '($.eventName=CreateRoute) || ($.eventName=CreateRouteTable) || ($.eventName=ReplaceRoute) || ($.eventName=ReplaceRouteTableAssociation) || ($.eventName=DeleteRouteTable) || ($.eventName=DeleteRoute) || ($.eventName=DisassociateRouteTable)', 'CIS-3.14-VPCChanges': '($.eventName=CreateVpc) || ($.eventName=DeleteVpc) || ($.eventName=ModifyVpcAttribute) || ($.eventName=AcceptVpcPeeringConnection) || ($.eventName=CreateVpcPeeringConnection) || ($.eventName=DeleteVpcPeeringConnection) || ($.eventName=RejectVpcPeeringConnection) || ($.eventName=AttachClassicLinkVpc) || ($.eventName=DetachClassicLinkVpc) || ($.eventName=DisableVpcClassicLink) || ($.eventName=EnableVpcClassicLink)', } for x, y in cis_metricfilter_alarms.items(): str_x = str(x) str_y = str(y) logs.MetricFilter( self, "MetricFilter_" + str_x, log_group=trail.log_group, filter_pattern=logs.JsonPattern(json_pattern_string=str_y), metric_name=str_x, metric_namespace="LogMetrics", metric_value='1') cloudwatch.Alarm( self, "Alarm_" + str_x, alarm_name=str_x, alarm_description=str_x, statistic='Sum', period=core.Duration.minutes(5), comparison_operator=cloudwatch.ComparisonOperator. GREATER_THAN_OR_EQUAL_TO_THRESHOLD, evaluation_periods=1, threshold=1, metric=cloudwatch.Metric(metric_name=str_x, namespace="LogMetrics"), ).add_alarm_action(cloudwatch_actions_cis) # IAM Password Policy custom resource CIS 1.5 - 1.11 cfn_template = cfn_inc.CfnInclude( self, "includeTemplate", template_file="account-password-policy.yaml", parameters={ "MaxPasswordAge": 90, "MinimumPasswordLength": 14, "PasswordReusePrevention": 24, "RequireLowercaseCharacters": True, "RequireNumbers": True, "RequireSymbols": True, "RequireUppercaseCharacters": True, }) # CIS 1.20 support_role = iam.Role( self, "SupportRole", assumed_by=iam.AccountPrincipal( account_id=core.Stack.of(self).account), managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name( 'AWSSupportAccess') ], role_name='AWSSupportAccess') guardduty_detector = guardduty.CfnDetector(self, 'GuardDutyDetector', enable=True) guardduty_event = events.Rule( self, 'GuardDutyEvent', rule_name='guardduty-notification', description='GuardDuty Notification', event_pattern=events.EventPattern( source=['aws.guardduty'], detail_type=['GuardDuty Finding']), targets=[events_targets.SnsTopic(security_notifications_topic)])
def __init__(self, app: core.App, id: str) -> None: super().__init__(app, id) # Setting up a role to represent config service principal aws_role = iam.Role( self, 'ConfigRole', assumed_by=iam.ServicePrincipal('config.amazonaws.com') ) # Adding a managed policy to the above role aws_role.add_managed_policy(iam.ManagedPolicy.from_aws_managed_policy_name("service-role/AWSConfigRole")) # Setting up ConfigurationRecorder for AWS Config aws_config_recorder = config.CfnConfigurationRecorder( self, 'ConfigRecorder', role_arn=aws_role.role_arn, recording_group={"allSupported": True} ) # Setting up the S3 bucket for Config to deliver the changes aws_config_bucket = s3.Bucket(self, 'ConfigBucket') # Adding policies to the S3 bucket aws_config_bucket.add_to_resource_policy(iam.PolicyStatement( effect=iam.Effect.ALLOW, principals=[aws_role], resources=[aws_config_bucket.bucket_arn], actions=["s3:GetBucketAcl", "s3:ListBucket"] )) cst_resource = 'AWSLogs/' + core.Stack.of(self).account + '/Config/*' aws_config_bucket.add_to_resource_policy(iam.PolicyStatement( effect=iam.Effect.ALLOW, principals=[aws_role], resources=[aws_config_bucket.arn_for_objects(cst_resource)], actions=["s3:PutObject"], conditions={"StringEquals": { "s3:x-amz-acl": "bucket-owner-full-control"}} )) # Creating the deliverchannel for Config config.CfnDeliveryChannel( self, 'ConfigDeliveryChannel', s3_bucket_name=aws_config_bucket.bucket_name ) # Create CloulTrail trail trail.Trail(self, 'Trail') # Create Config managed rule aws_config_managed_rule = config.ManagedRule( self, "restricted-ssh", identifier=config.ManagedRuleIdentifiers.EC2_SECURITY_GROUPS_INCOMING_SSH_DISABLED ) # You cant create a rule if recorder is not enabled aws_config_managed_rule.node.add_dependency(aws_config_recorder) # Event pattern triggered by change in the AWS Config compliance rule dtl = """{ "requestParameters": { "evaluations": { "complianceType": [ "NON_COMPLIANT" ] } }, "additionalEventData": { "managedRuleIdentifier": [ "INCOMING_SSH_DISABLED" ] } }""" # detail needs to be a JSON object detail = json.loads(dtl) # Create an eventbridge rule to be triggered by AWS Config aws_event_rule = events.Rule( self, "Rule", description='rule that triggers a lambda function to revoke SSH public access directly after AWS Config NON COMFORM event', event_pattern=events.EventPattern( detail=detail, source=["aws.config"] ) ) # Create role for the lambda function aws_lambda_se_group_role = iam.Role( self, 'aws_lambda_security_group_role', assumed_by=iam.ServicePrincipal('lambda.amazonaws.com'), managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name("service-role/AWSLambdaBasicExecutionRole") ]) # Add policy to Lambda role aws_lambda_se_group_role.add_to_policy(iam.PolicyStatement( effect=iam.Effect.ALLOW, resources=["*"], actions=["ec2:RevokeSecurityGroupIngress", "config:GetComplianceDetailsByConfigRule", "sts:GetCallerIdentity", "ec2:DescribeSecurityGroups"])) # Create lambda function and pass it the above role with open("lambda.py", encoding="utf8") as fp: handler_code = fp.read() aws_lambda_fn = lambda_.Function( self, "revoke-ssh-access", role=aws_lambda_se_group_role, code=lambda_.InlineCode(handler_code), handler="index.lambda_handler", timeout=core.Duration.seconds(300), runtime=lambda_.Runtime.PYTHON_3_7, ) # Add environment variable for lambda function aws_lambda_fn.add_environment("SSH_RULE_NAME", aws_config_managed_rule.config_rule_name) # Adding the lambda function as a target of the rule aws_event_rule.add_target(targets.LambdaFunction(aws_lambda_fn))
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) mybucket = s3.Bucket( self, ID + '-bucket', encryption=s3.BucketEncryption.S3_MANAGED, versioned=True, lifecycle_rules=[ s3.LifecycleRule( enabled=True, noncurrent_version_expiration=core.Duration.days(30), ) ]) cloudtrail.Trail( self, ID + '-trail', enable_file_validation=True, include_global_service_events=True, is_multi_region_trail=True, send_to_cloud_watch_logs=True, cloud_watch_logs_retention=logs.RetentionDays.ONE_WEEK, ) func_diff_notice = lambda_.Function( self, ID + '-func_diff_notice', code=lambda_.Code.asset('./functions/artifacts/'), handler='app.diff_notice', runtime=lambda_.Runtime.PYTHON_3_7, log_retention=logs.RetentionDays.ONE_MONTH, memory_size=128, timeout=core.Duration.seconds(60), tracing=lambda_.Tracing.ACTIVE) func_diff_notice.add_to_role_policy( iam.PolicyStatement( actions=['ssm:GetParameter'], resources=['*'], )) func_codebuild_alert = lambda_.Function( self, ID + '-func_codebuild_alert', code=lambda_.Code.asset('./functions/artifacts/'), handler='app.codebuild_alert', runtime=lambda_.Runtime.PYTHON_3_7, log_retention=logs.RetentionDays.ONE_MONTH, memory_size=128, timeout=core.Duration.seconds(60), tracing=lambda_.Tracing.ACTIVE) func_codebuild_alert.add_to_role_policy( iam.PolicyStatement( actions=['ssm:GetParameter'], resources=['*'], )) codebuild_project = codebuild.Project( self, ID + '-codebuild-project', environment=codebuild.BuildEnvironment( build_image=codebuild.LinuxBuildImage.STANDARD_2_0, compute_type=codebuild.ComputeType.SMALL), build_spec=codebuild.BuildSpec.from_object({ 'version': 0.2, 'phases': { 'install': { 'runtime-versions': { 'ruby': 2.6 }, 'commands': ['gem install roadworker'] }, 'build': { 'commands': [ 'roadwork --export --target-zone ' + HOSTED_ZONE_NAME + ' --output Routefile', 'aws s3 cp Routefile s3://' + mybucket.bucket_name + '/Routefile' ] } } })) codebuild_project.add_to_role_policy( iam.PolicyStatement( actions=['s3:putObject'], resources=[mybucket.bucket_arn + '/Routefile'], )) codebuild_project.add_to_role_policy( iam.PolicyStatement( actions=['route53:List*', 'route53:Get*'], resources=['*'], )) codebuild_project.on_build_failed( ID + '-rule-on_build_failed', target=targets.LambdaFunction(func_codebuild_alert)) rule = events.Rule(self, ID + '-rule', enabled=True) rule.add_event_pattern( source=['aws.route53'], detail_type=['AWS API Call via CloudTrail'], detail={ 'eventSource': ['route53.amazonaws.com'], 'eventName': ['ChangeResourceRecordSets'], 'requestParameters': { 'hostedZoneId': [HOSTED_ZONE_ID] }, }, ) rule.add_target(targets.LambdaFunction(func_diff_notice)) rule.add_target(targets.CodeBuildProject(codebuild_project))