def _add_cloudtrail_listener( builder: Template, replication_bucket: Parameter, log_bucket: s3.Bucket, log_bucket_policy: s3.BucketPolicy) -> cloudtrail.Trail: trail = cloudtrail.Trail( "ReplicationListenerTrail", DependsOn=[log_bucket_policy.title], EnableLogFileValidation=True, EventSelectors=[ cloudtrail.EventSelector( IncludeManagementEvents=False, DataResources=[ cloudtrail.DataResource( Type="AWS::S3::Object", Values=[ Sub(f"arn:${{{AWS_PARTITION}}}:s3:::${{{replication_bucket.title}}}/{ARTIFACT_MANIFESTS_PREFIX}" ) ], ) ], ) ], IncludeGlobalServiceEvents=False, IsLogging=True, IsMultiRegionTrail=False, S3BucketName=log_bucket.ref(), S3KeyPrefix="accretion/cloudtrail/", Tags=DEFAULT_TAGS, ) return builder.add_resource(trail)
def cloudtrail_adder(self, name, bucket, cw_group=Ref("AWS::NoValue"), cw_role_arn=Ref("AWS::NoValue")): trail = cloudtrail.Trail(name, TrailName=name, IsLogging=True, S3BucketName=bucket, S3KeyPrefix=Ref("AWS::AccountId"), CloudWatchLogsLogGroupArn=cw_group, CloudWatchLogsRoleArn=cw_role_arn) self.template.add_resource(trail) return trail
def buildInfrastructure(t, args): t.add_resource( ec2.VPC('VPC', CidrBlock='10.0.0.0/16', EnableDnsSupport='true', EnableDnsHostnames='true')) t.add_resource( ec2.Subnet('PublicSubnet1', VpcId=Ref('VPC'), CidrBlock='10.0.1.0/24', AvailabilityZone=Select("0", GetAZs("")))) t.add_resource(ec2.InternetGateway('ig')) t.add_resource( ec2.VPCGatewayAttachment('igAttach', VpcId=Ref('VPC'), InternetGatewayId=Ref('ig'))) t.add_resource(ec2.RouteTable('rtTablePublic', VpcId=Ref('VPC'))) t.add_resource( ec2.Route('rtPublic', RouteTableId=Ref('rtTablePublic'), DestinationCidrBlock='0.0.0.0/0', GatewayId=Ref('ig'), DependsOn='igAttach')) t.add_resource( ec2.SubnetRouteTableAssociation('rtPublic1Attach', SubnetId=Ref('PublicSubnet1'), RouteTableId=Ref('rtTablePublic'))) t.add_resource( kms.Key('OpenEMRKey', DeletionPolicy='Delete', KeyPolicy={ "Version": "2012-10-17", "Id": "key-default-1", "Statement": [{ "Sid": "1", "Effect": "Allow", "Principal": { "AWS": [Join(':', ['arn:aws:iam:', ref_account, 'root'])] }, "Action": "kms:*", "Resource": "*" }] })) t.add_resource( s3.Bucket( 'S3Bucket', DeletionPolicy='Retain', BucketName=Join( '-', ['openemr', Select('2', Split('/', ref_stack_id))]))) t.add_resource( s3.BucketPolicy( 'BucketPolicy', Bucket=Ref('S3Bucket'), PolicyDocument={ "Version": "2012-10-17", "Statement": [{ "Sid": "AWSCloudTrailAclCheck", "Effect": "Allow", "Principal": { "Service": "cloudtrail.amazonaws.com" }, "Action": "s3:GetBucketAcl", "Resource": { "Fn::Join": ["", ["arn:aws:s3:::", { "Ref": "S3Bucket" }]] } }, { "Sid": "AWSCloudTrailWrite", "Effect": "Allow", "Principal": { "Service": "cloudtrail.amazonaws.com" }, "Action": "s3:PutObject", "Resource": { "Fn::Join": [ "", [ "arn:aws:s3:::", { "Ref": "S3Bucket" }, "/AWSLogs/", { "Ref": "AWS::AccountId" }, "/*" ] ] }, "Condition": { "StringEquals": { "s3:x-amz-acl": "bucket-owner-full-control" } } }] })) t.add_resource( cloudtrail.Trail('CloudTrail', DependsOn='BucketPolicy', IsLogging=True, IncludeGlobalServiceEvents=True, IsMultiRegionTrail=True, S3BucketName=Ref('S3Bucket'))) return t
def buildInfrastructure(t, args): if (not args.recovery): t.add_resource( kms.Key( 'OpenEMRKey', DeletionPolicy='Retain' if args.recovery else 'Delete' if args.dev else 'Retain', KeyPolicy={ "Version": "2012-10-17", "Id": "key-default-1", "Statement": [{ "Sid": "1", "Effect": "Allow", "Principal": { "AWS": [Join(':', ['arn:aws:iam:', ref_account, 'root'])] }, "Action": "kms:*", "Resource": "*" }] })) t.add_resource( s3.Bucket( 'S3Bucket', DeletionPolicy='Retain', BucketName=Join( '-', ['openemr', Select('2', Split('/', ref_stack_id))]))) t.add_resource( s3.BucketPolicy( 'BucketPolicy', Bucket=Ref('S3Bucket'), PolicyDocument={ "Version": "2012-10-17", "Statement": [{ "Sid": "AWSCloudTrailAclCheck", "Effect": "Allow", "Principal": { "Service": "cloudtrail.amazonaws.com" }, "Action": "s3:GetBucketAcl", "Resource": { "Fn::Join": ["", ["arn:aws:s3:::", { "Ref": "S3Bucket" }]] } }, { "Sid": "AWSCloudTrailWrite", "Effect": "Allow", "Principal": { "Service": "cloudtrail.amazonaws.com" }, "Action": "s3:PutObject", "Resource": { "Fn::Join": [ "", [ "arn:aws:s3:::", { "Ref": "S3Bucket" }, "/AWSLogs/", { "Ref": "AWS::AccountId" }, "/*" ] ] }, "Condition": { "StringEquals": { "s3:x-amz-acl": "bucket-owner-full-control" } } }] })) t.add_resource( cloudtrail.Trail('CloudTrail', DependsOn='BucketPolicy', IsLogging=True, IncludeGlobalServiceEvents=True, IsMultiRegionTrail=True, S3BucketName=Ref('S3Bucket'))) t.add_resource( ec2.SecurityGroup('ApplicationSecurityGroup', GroupDescription='Application Security Group', VpcId=Ref('VPC'), Tags=Tags(Name='Application'))) return t
def generate(dry_run, file_location=None): """CloudFormation template generator to apply to all accounts which configures log sources to publish to the centralized log target(s) specified""" t = Template() t.add_version("2010-09-09") t.add_description( "UCSD Log Source AWS CloudFormation Template - this template is meant to be applied to pre-approved accounts and configures CloudWatch Logs to forward to the UCSD log aggregation process." ) # # CloudWatch Logs setup - Set up shipping to 'centralized' account # # Parameters delivery_stream_arn = t.add_parameter( Parameter('LogDeliveryDestinationArn', Type="String", Default="", Description="ARN of the Log Destination to send logs to.")) # resources cwl_group_retention = t.add_parameter( Parameter("LogGroupRetentionInDays", Type="Number", Description= "Number of days to retain logs in the CloudWatch Log Group", MinValue=1, MaxValue=14, Default=1)) cwl_group = t.add_resource( cwl.LogGroup('SecurityLogShippingGroup', LogGroupName=security_log_shipping_group_name, RetentionInDays=Ref(cwl_group_retention))) cwl_subscription = t.add_resource( cwl.SubscriptionFilter('SecurityLogShippingFilter', DestinationArn=Ref(delivery_stream_arn), LogGroupName=Ref(cwl_group), FilterPattern="")) cwl_primary_stream = t.add_resource( cwl.LogStream('PrimaryLogStream', LogGroupName=Ref(cwl_group), LogStreamName='PrimaryLogStream')) # Create IAM role to allow VPC Flow Logs within this account to push data to CloudWatch Logs per https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/flow-logs.html#flow-logs-iam vpc_flow_log_iam_role = t.add_resource( iam.Role('VPCFlowLogToCWLIAMRole', AssumeRolePolicyDocument=Policy(Statement=[ Statement(Effect=Allow, Action=[AssumeRole], Principal=Principal( "Service", "vpc-flow-logs.amazonaws.com")) ]))) vpc_flow_log_policies = t.add_resource( iam.PolicyType( 'VPCFlowLogToCWLPolicy', PolicyName='vpcflowlogtocwlpolicy20180213', Roles=[Ref(vpc_flow_log_iam_role)], PolicyDocument=Policy(Statement=[ Statement(Effect=Allow, Action=[ CreateLogGroup, CreateLogStream, PutLogEvents, DescribeLogGroups, DescribeLogStreams ], Resource=["*"]) ]))) # outputs t.add_output( Output( 'CloudWatchLogGroupName', Value=Ref(cwl_group), Description= "Name of the CloudWatch Log Group created to flow logs to the centralized logging stream." )) t.add_output( Output( 'CloudWatchLogGroupARN', Value=GetAtt(cwl_group, "Arn"), Description= "ARN of the CloudWatch Log Group created to flow logs to the centralized logging stream." )) t.add_output( Output( 'VPCFlowLogDeliveryLogsPermissionArn', Value=GetAtt(vpc_flow_log_iam_role, "Arn"), Description= "ARN of the IAM role for VPC Flow Logs to use within this account to ship VPC flow logs through." )) # # CloudTrail setup - ship to S3 in 'central account' as well as cloudtrail logs if it'll let us :) # # parameters ct_is_logging = t.add_parameter( Parameter( 'CloudTrailIsLogging', Type="String", Default="false", AllowedValues=["true", "false"], Description= "Flag indicating that CloudTrail is configured to send logs.")) ct_include_global = t.add_parameter( Parameter( 'CloudTrailIncludeGlobal', Type="String", Default="true", AllowedValues=["true", "false"], Description= "Flag indicating that CloudTrail is configured to capture global service events." )) ct_multi_region = t.add_parameter( Parameter( 'CloudTrailMultiRegion', Type="String", Default="true", AllowedValues=["true", "false"], Description= "Flag indicating that CloudTrail is to be configured in multi-region mode" )) ct_s3_key_prefix = t.add_parameter( Parameter('CloudTrailKeyPrefix', Type='String', Default='', Description='Key name prefix for logs being sent to S3')) ct_bucket_name = t.add_parameter( Parameter( 'CloudTrailBucketName', Type='String', Default='', Description='Name of the S3 Bucket for delivery of CloudTrail logs' )) # resources ct_trail = t.add_resource( ct.Trail("SecurityTrail", TrailName=Join("-", ["SecurityTrail", Region]), S3BucketName=Ref(ct_bucket_name), S3KeyPrefix=Ref(ct_s3_key_prefix), IncludeGlobalServiceEvents=Ref(ct_include_global), IsMultiRegionTrail=Ref(ct_multi_region), IsLogging=Ref(ct_is_logging))) # outputs t.add_output( Output( 'CloudTrailARN', Description= "ARN of the CloudTrail Trail configured for this log source deployment.", Value=GetAtt(ct_trail, "Arn"))) # Splunk Addon User and Policies per http://docs.splunk.com/Documentation/AddOns/released/AWS/ConfigureAWSpermissions addon_user = t.add_resource( iam.User('SplunkAddonUser', UserName='******')) # http://docs.splunk.com/Documentation/AddOns/released/AWS/ConfigureAWSpermissions#Configure_CloudTrail_permissions ct_splunk_user_policy = t.add_resource( iam.PolicyType('cloudtrailSplunkPolicy', PolicyName='cloudtrailsplunkuser20180213', Roles=[Ref(vpc_flow_log_iam_role)], PolicyDocument=Policy(Statement=[ Statement(Effect=Allow, Action=[ asqs.GetQueueAttributes, asqs.ListQueues, asqs.ReceiveMessage, asqs.GetQueueUrl, asqs.DeleteMessage, as3.Action('Get*'), as3.Action('List*'), as3.Action('Delete*') ], Resource=["*"]) ]))) # http://docs.splunk.com/Documentation/AddOns/released/AWS/ConfigureAWSpermissions#Configure_CloudWatch_permissions cw_splunk_user_policy = t.add_resource( iam.PolicyType('cloudwatchSplunkPolicy', PolicyName='cloudwatchsplunkuser20180213', Roles=[Ref(vpc_flow_log_iam_role)], PolicyDocument=Policy(Statement=[ Statement(Effect=Allow, Action=[ aas.Action("Describe*"), acw.Action("Describe*"), acw.Action("Get*"), acw.Action("List*"), asns.Action("Get*"), asns.Action("List*") ], Resource=['*']) ]))) if dry_run: print(t.to_json()) else: save_path = file_location if file_location else os.path.join( log_aggregation_cf, 'log_sources.json') with open(save_path, 'w') as f: f.write(t.to_json())