def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) #ssm parameter for test ssm_01 = aws_ssm.StringParameter(self, "ssm_01", description="test parameter", parameter_name="/test/demo/iam", string_value="123", tier=aws_ssm.ParameterTier.STANDARD) #IAM users and Groups user01_password = aws_secrm.Secret(self, "user01password", description="User01 Password", secret_name="user01_password") #users user01 = aws_iam.User(self, "user01", password=user01_password.secret_value, user_name="user01") user02 = aws_iam.User( self, "user02", password=core.SecretValue.plain_text("Testpassowrd123"), user_name="user02") #iam group test_group = aws_iam.Group(self, "test_group", group_name="test-group") test_group.add_user(user02) #iam policy test_group.add_managed_policy( aws_iam.ManagedPolicy.from_aws_managed_policy_name( "AmazonS3ReadOnlyAccess")) #grant ssm parameter access to iam group ssm_01.grant_read(test_group) #grant test group to list all parameters in aws console test_group_stemt = aws_iam.PolicyStatement( effect=aws_iam.Effect.ALLOW, resources=["*"], actions=["ssm:DescribeParameters"]) test_group_stemt.sid = "ListAllParametersForTestGroup" #add PolicyStatement to group test_group.add_to_policy(test_group_stemt) #iam role test_role = aws_iam.Role( self, "test_role", assumed_by=aws_iam.AccountPrincipal(f"{core.Aws.ACCOUNT_ID}"), role_name="cdk_test_role") #policy attached to role managed_policy_01 = aws_iam.ManagedPolicy( self, "managed_policy_01_list_ec2", description="list ec2", managed_policy_name="managed_policy_01_list_ec2", statements=[ aws_iam.PolicyStatement(effect=aws_iam.Effect.ALLOW, actions=[ "ec2:Describe*", "cloudwatch:Describe*", "cloudwatch:Get*" ], resources=["*"]) ], roles=[test_role]) #login url login_output01 = core.CfnOutput( self, "login_output01", description="Login URL for user02", value=f"https://{core.Aws.ACCOUNT_ID}.signin.aws.amazon.com/console" )
DEPLOYMENT_REGION = stage['deployment-region'] TRUSTED_ACCOUNTS = stage['trusted-accounts'] stack = core.Stack(app, 'CodePipelineSharedResourcesStack', env=core.Environment(account=DEPLOYMENT_ACCOUNT, region=DEPLOYMENT_REGION)) artifacts_key = kms.Key(stack, 'ArtifactsKey', removal_policy=core.RemovalPolicy.DESTROY) artifacts_key.add_to_resource_policy( iam.PolicyStatement( effect=iam.Effect.ALLOW, principals=[ iam.AccountPrincipal(account) for account in TRUSTED_ACCOUNTS + [DEPLOYMENT_ACCOUNT] ], actions=[ 'kms:Decrypt', 'kms:DescribeKey', 'kms:Encrypt', 'kms:ReEncrypt*', 'kms:GenerateDataKey*' ], resources=['*'])) artifacts_bucket = s3.Bucket(stack, 'ArtifactsBucket', encryption=s3.BucketEncryption.KMS, encryption_key=artifacts_key, removal_policy=core.RemovalPolicy.DESTROY) artifacts_bucket.add_to_resource_policy( iam.PolicyStatement(effect=iam.Effect.ALLOW,
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # Lets generate a password for our user shiny_new_pass = random_string_generator(self, "shinyNewPasswordGenerator", Length=20) # Lets create a user projectRedUser1redRosy = iam.User(self, "projectRedUser1redRosy", user_name="redRosy", password=core.SecretValue.plain_text( shiny_new_pass.response)) teamUnicornGrp = iam.Group(self, "teamUnicornGrp", group_name="teamUnicorn") # Add Users To Group teamUnicornGrp.add_user(projectRedUser1redRosy) # blueGrp1.add_managed_policy(iam.ManagedPolicy.from_aws_managed_policy_name("AmazonS3ReadOnlyAccess")) ############################################## # We need a custom resource to TAG IAM Users # ############################################## iamUserTaggerResp = iam_user_tagger( self, "iamTagger", message=[{ "user": projectRedUser1redRosy.user_name, "tags": [{ 'Key': 'teamName', 'Value': 'teamUnicorn' }, { 'Key': 'projectName', 'Value': 'projectRed' }] }]) # Lets Create the IAM Role # Uses belonging to this group, will be able to asume this role based on tag validation accountId = core.Aws.ACCOUNT_ID teamUnicornProjectRedRole = iam.Role( self, 'unicornTeamProjectRedRoleId', assumed_by=iam.AccountPrincipal(f"{accountId}"), role_name="teamUnicornProjectRedRole") core.Tag.add(teamUnicornProjectRedRole, key="teamName", value="teamUnicorn") core.Tag.add(teamUnicornProjectRedRole, key="projectName", value="projectRed") """ # Allow Group to Assume Role # The role will have naming convention like, <TEAM-NAME><PROJECT-NAME>ROLE For Ex: unicornTeamProjectRedRole """ grpStmt1 = iam.PolicyStatement( effect=iam.Effect.ALLOW, resources=[f"arn:aws:iam::{accountId}:role/teamUnicornProject*"], actions=["sts:AssumeRole"], conditions={ "StringEquals": { "iam:ResourceTag/teamName": "${aws:PrincipalTag/teamName}", "iam:ResourceTag/projectName": "${aws:PrincipalTag/projectName}" } }) grpStmt1.sid = "AllowGroupMembersToAssumeRoleMatchingTeamName" # Attach the policy to the group teamUnicornGrp.add_to_policy(grpStmt1) # Add Permissions to the Role roleStmt0 = iam.PolicyStatement(effect=iam.Effect.ALLOW, resources=["*"], actions=[ "ec2:Describe*", "cloudwatch:Describe*", "cloudwatch:Get*", ]) roleStmt0.sid = "AllowUserToDescribeInstances" teamUnicornProjectRedRole.add_to_policy(roleStmt0) roleStmt1a = iam.PolicyStatement( effect=iam.Effect.ALLOW, resources=[ "arn:aws:ec2:*::image/*", "arn:aws:ec2:*::snapshot/*", "arn:aws:ec2:*:*:subnet/*", "arn:aws:ec2:*:*:network-interface/*", "arn:aws:ec2:*:*:security-group/*", "arn:aws:ec2:*:*:key-pair/*" ], actions=["ec2:RunInstances"]) roleStmt1a.sid = "AllowRunInstances" teamUnicornProjectRedRole.add_to_policy(roleStmt1a) roleStmt1b = iam.PolicyStatement( effect=iam.Effect.ALLOW, resources=[ "arn:aws:ec2:*:*:instance/*", "arn:aws:ec2:*:*:volume/*", ], actions=["ec2:CreateVolume", "ec2:RunInstances"], conditions={ "StringEquals": { "aws:RequestTag/teamName": "${aws:PrincipalTag/teamName}", "aws:RequestTag/projectName": "${aws:PrincipalTag/projectName}" }, "ForAllValues:StringEquals": { "aws:TagKeys": ["teamName", "projectName"] } }) roleStmt1b.sid = "AllowRunInstancesWithRestrictionsRequiredTags" teamUnicornProjectRedRole.add_to_policy(roleStmt1b) roleStmt2 = iam.PolicyStatement( effect=iam.Effect.ALLOW, resources=[ "arn:aws:ec2:*:*:instance/*", "arn:aws:ec2:*:*:volume/*" ], actions=["ec2:CreateTags"], conditions={ "StringEquals": { "aws:RequestTag/teamName": "${aws:PrincipalTag/teamName}", "aws:RequestTag/projectName": "${aws:PrincipalTag/projectName}" }, "ForAllValues:StringEquals": { "aws:TagKeys": ["projectName", "teamName"] }, "StringEquals": { "ec2:CreateAction": "RunInstances" } }) roleStmt2.sid = "AllowCreateTagsIfRequestingValidTags" teamUnicornProjectRedRole.add_to_policy(roleStmt2) roleStmt3 = iam.PolicyStatement( effect=iam.Effect.ALLOW, resources=[ "arn:aws:ec2:*:*:instance/*", "arn:aws:ec2:*:*:volume/*" ], actions=[ "ec2:RebootInstances", "ec2:TerminateInstances", "ec2:StartInstances", "ec2:StopInstances" ], conditions={ "StringEquals": { "ec2:ResourceTag/teamName": "${aws:PrincipalTag/teamName}", "ec2:ResourceTag/projectName": "${aws:PrincipalTag/projectName}" } }) roleStmt3.sid = "AllowInstanceManagementIfTagsMatch" teamUnicornProjectRedRole.add_to_policy(roleStmt3) # Lets create couple of instances to test vpc = ec2.Vpc(self, "abacVPC", cidr="10.13.0.0/21", max_azs=2, nat_gateways=0, subnet_configuration=[ ec2.SubnetConfiguration( name="pubSubnet", cidr_mask=24, subnet_type=ec2.SubnetType.PUBLIC) ]) # Tag all VPC Resources core.Tag.add(vpc, key="Owner", value="KonStone", include_resource_types=[]) core.Tag.add(vpc, key="teamName", value="teamUnicorn", include_resource_types=[]) # We are using the latest AMAZON LINUX AMI ami_id = ec2.AmazonLinuxImage(generation=ec2.AmazonLinuxGeneration. AMAZON_LINUX_2).get_image(self).image_id red_web_inst = ec2.CfnInstance( self, "redWebInstance01", image_id=ami_id, instance_type="t2.micro", monitoring=False, tags=[{ "key": "teamName", "value": "teamUnicorn" }, { "key": "projectName", "value": "projectRed" }, { "key": "Name", "value": "projectRed-Web" }], network_interfaces=[{ "deviceIndex": "0", "associatePublicIpAddress": True, "subnetId": vpc.public_subnets[0].subnet_id, # "groupSet": [web_sg.security_group_id] }], #https: //github.com/aws/aws-cdk/issues/3419 ) # core.Tag.add(red_web_inst,key="Owner",value="KonStone",include_resource_types=[]) blue_web_inst = ec2.CfnInstance( self, "blueWebInstance01", image_id=ami_id, instance_type="t2.micro", monitoring=False, tags=[{ "key": "teamName", "value": "teamUnicorn" }, { "key": "projectName", "value": "projectBlue" }, { "key": "Name", "value": "projectBlue-Web" }], network_interfaces=[{ "deviceIndex": "0", "associatePublicIpAddress": True, "subnetId": vpc.public_subnets[0].subnet_id, # "groupSet": [web_sg.security_group_id] }], #https: //github.com/aws/aws-cdk/issues/3419 ) # core.Tag.add(blue_web_inst,key="Owner",value="KonStone",include_resource_types=[]) # https://signin.aws.amazon.com/switchrole?roleName=teamUnicornProjectRedRole&account=lint3r role_login_url = ( f"https://signin.aws.amazon.com/switchrole?&account={accountId}" f"&roleName={teamUnicornProjectRedRole.role_name}") output1 = core.CfnOutput(self, "Red-Rosy-AssumeRoleUrl", value=role_login_url, description="Url to login & assume role") output2 = core.CfnOutput(self, "redRosy_user_password", value=shiny_new_pass.response, description="redRosy user password") # Publish the custom resource output output3 = core.CfnOutput( self, "IAMUserTaggerResponseMessage", description="IAM User Tagging Successful", value=iamUserTaggerResp.response, ) # Publish WebInstances ID and Tags output4 = core.CfnOutput( self, "ProjectRed-Web-Instance", description="Project Red Web Instance Publice IP", value=core.Fn.get_att(logical_name_of_resource="redWebInstance01", attribute_name="PublicIp").to_string(), ) output5 = core.CfnOutput( self, "ProjectBlue-Web-Instance", description="Project Blue Web Instance Publice IP", value=core.Fn.get_att(logical_name_of_resource="blueWebInstance01", attribute_name="PublicIp").to_string(), ) output10 = core.CfnOutput( self, "Red-Rosy-User-Login-Url", value=( f"https://{core.Aws.ACCOUNT_ID}.signin.aws.amazon.com/console" ), description=f"The URL for Rosy to Login")
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) ###################################################################### # ELB mapping ###################################################################### elb_id_temp = region_info.FactName.ELBV2_ACCOUNT elb_map_temp = region_info.RegionInfo.region_map(elb_id_temp) elb_mapping = {} for key in elb_map_temp: elb_mapping[key] = {'accountid': elb_map_temp[key]} elb_accounts = core.CfnMapping(scope=self, id='ELBv2AccountMap', mapping=elb_mapping) ###################################################################### # get params ###################################################################### allow_source_address = core.CfnParameter( self, 'AllowedSourceIpAddresses', allowed_pattern=r'^[0-9./\s]*', description='Space-delimited list of CIDR blocks', default='10.0.0.0/8 172.16.0.0/12 192.168.0.0/16') sns_email = core.CfnParameter( self, 'SnsEmail', allowed_pattern=r'^[0-9a-zA-Z@_\-\+\.]*', description=('Input your email as SNS topic, where Amazon ES will ' 'send alerts to'), default='*****@*****.**') geoip_license_key = core.CfnParameter( self, 'GeoLite2LicenseKey', allowed_pattern=r'^[0-9a-zA-Z]{16}$', default='xxxxxxxxxxxxxxxx', description=("If you wolud like to enrich geoip locaiton such as " "IP address's country, get a license key form MaxMind" " and input the key. If you not, keep " "xxxxxxxxxxxxxxxx")) aes_domain_name = self.node.try_get_context('aes_domain_name') bucket = f'{aes_domain_name}-{core.Aws.ACCOUNT_ID}' s3bucket_name_geo = f'{bucket}-geo' s3bucket_name_log = f'{bucket}-log' s3bucket_name_snapshot = f'{bucket}-snapshot' # organizations / multiaccount org_id = self.node.try_get_context('organizations').get('org_id') org_mgmt_id = self.node.try_get_context('organizations').get( 'management_id') org_member_ids = self.node.try_get_context('organizations').get( 'member_ids') no_org_ids = self.node.try_get_context('no_organizations').get( 'aws_accounts') temp_geo = self.node.try_get_context('s3_bucket_name').get('geo') if temp_geo: s3bucket_name_geo = temp_geo temp_log = self.node.try_get_context('s3_bucket_name').get('log') if temp_log: s3bucket_name_log = temp_log elif org_id or no_org_ids: s3bucket_name_log = f'{aes_domain_name}-{self.account}-log' temp_snap = self.node.try_get_context('s3_bucket_name').get('snapshot') if temp_snap: s3bucket_name_snapshot = temp_snap kms_cmk_alias = self.node.try_get_context('kms_cmk_alias') if not kms_cmk_alias: kms_cmk_alias = 'aes-siem-key' ###################################################################### # deploy VPC when context is defined as using VPC ###################################################################### # vpc_type is 'new' or 'import' or None vpc_type = self.node.try_get_context('vpc_type') if vpc_type == 'new': is_vpc = True vpc_cidr = self.node.try_get_context('new_vpc_nw_cidr_block') subnet_cidr_mask = int( self.node.try_get_context('new_vpc_subnet_cidr_mask')) is_vpc = True # VPC vpc_aes_siem = aws_ec2.Vpc( self, 'VpcAesSiem', cidr=vpc_cidr, max_azs=3, nat_gateways=0, subnet_configuration=[ aws_ec2.SubnetConfiguration( subnet_type=aws_ec2.SubnetType.ISOLATED, name='aes-siem-subnet', cidr_mask=subnet_cidr_mask) ]) subnet1 = vpc_aes_siem.isolated_subnets[0] subnets = [{'subnet_type': aws_ec2.SubnetType.ISOLATED}] vpc_subnets = aws_ec2.SubnetSelection( subnet_type=aws_ec2.SubnetType.ISOLATED) vpc_aes_siem_opt = vpc_aes_siem.node.default_child.cfn_options vpc_aes_siem_opt.deletion_policy = core.CfnDeletionPolicy.RETAIN for subnet in vpc_aes_siem.isolated_subnets: subnet_opt = subnet.node.default_child.cfn_options subnet_opt.deletion_policy = core.CfnDeletionPolicy.RETAIN elif vpc_type == 'import': vpc_id = self.node.try_get_context('imported_vpc_id') _sbunet1 = self.node.try_get_context('imported_vpc_subnet1') _sbunet2 = self.node.try_get_context('imported_vpc_subnet2') _sbunet3 = self.node.try_get_context('imported_vpc_subnet3') vpc_aes_siem = aws_ec2.Vpc.from_lookup(self, 'VpcAesSiem', vpc_id=vpc_id) subnet1 = aws_ec2.Subnet.from_subnet_attributes( self, 'Subenet1', **_sbunet1) subnet2 = aws_ec2.Subnet.from_subnet_attributes( self, 'Subenet2', **_sbunet2) subnet3 = aws_ec2.Subnet.from_subnet_attributes( self, 'Subenet3', **_sbunet3) subnets = [subnet1, subnet2, subnet3] vpc_subnets = aws_ec2.SubnetSelection(subnets=subnets) if vpc_type: is_vpc = True # Security Group sg_vpc_noinbound_aes_siem = aws_ec2.SecurityGroup( self, 'AesSiemVpcNoinboundSecurityGroup', security_group_name='aes-siem-noinbound-vpc-sg', vpc=vpc_aes_siem) sg_vpc_aes_siem = aws_ec2.SecurityGroup( self, 'AesSiemVpcSecurityGroup', security_group_name='aes-siem-vpc-sg', vpc=vpc_aes_siem) sg_vpc_aes_siem.add_ingress_rule( peer=aws_ec2.Peer.ipv4(vpc_aes_siem.vpc_cidr_block), connection=aws_ec2.Port.tcp(443), ) sg_vpc_opt = sg_vpc_aes_siem.node.default_child.cfn_options sg_vpc_opt.deletion_policy = core.CfnDeletionPolicy.RETAIN # VPC Endpoint vpc_aes_siem.add_gateway_endpoint( 'S3Endpoint', service=aws_ec2.GatewayVpcEndpointAwsService.S3, subnets=subnets) vpc_aes_siem.add_interface_endpoint( 'SQSEndpoint', security_groups=[sg_vpc_aes_siem], service=aws_ec2.InterfaceVpcEndpointAwsService.SQS, ) vpc_aes_siem.add_interface_endpoint( 'KMSEndpoint', security_groups=[sg_vpc_aes_siem], service=aws_ec2.InterfaceVpcEndpointAwsService.KMS, ) vpc_aes_siem.add_interface_endpoint( 'SNSEndpoint', security_groups=[sg_vpc_aes_siem], service=aws_ec2.InterfaceVpcEndpointAwsService.SNS, ) else: is_vpc = False is_vpc = core.CfnCondition(self, 'IsVpc', expression=core.Fn.condition_equals( is_vpc, True)) """ CloudFormation実行時の条件式の書き方 ClassのBasesが aws_cdk.core.Resource の時は、 node.default_child.cfn_options.condition = is_vpc ClassのBasesが aws_cdk.core.CfnResource の時は、 cfn_options.condition = is_vpc """ ###################################################################### # create cmk of KMS to encrypt S3 bucket ###################################################################### kms_aes_siem = aws_kms.Key(self, 'KmsAesSiemLog', description='CMK for SIEM solution', removal_policy=core.RemovalPolicy.RETAIN) aws_kms.Alias(self, 'KmsAesSiemLogAlias', alias_name=kms_cmk_alias, target_key=kms_aes_siem, removal_policy=core.RemovalPolicy.RETAIN) kms_aes_siem.add_to_resource_policy( aws_iam.PolicyStatement( sid='Allow GuardDuty to use the key', actions=['kms:GenerateDataKey'], principals=[ aws_iam.ServicePrincipal('guardduty.amazonaws.com') ], resources=['*'], ), ) kms_aes_siem.add_to_resource_policy( aws_iam.PolicyStatement( sid='Allow VPC Flow Logs to use the key', actions=[ 'kms:Encrypt', 'kms:Decrypt', 'kms:ReEncrypt*', 'kms:GenerateDataKey*', 'kms:DescribeKey' ], principals=[ aws_iam.ServicePrincipal('delivery.logs.amazonaws.com') ], resources=['*'], ), ) # basic policy key_policy_basic1 = aws_iam.PolicyStatement( sid='Allow principals in the account to decrypt log files', actions=['kms:DescribeKey', 'kms:ReEncryptFrom'], principals=[ aws_iam.AccountPrincipal(account_id=core.Aws.ACCOUNT_ID) ], resources=['*'], ) kms_aes_siem.add_to_resource_policy(key_policy_basic1) # for CloudTrail key_policy_trail1 = aws_iam.PolicyStatement( sid='Allow CloudTrail to describe key', actions=['kms:DescribeKey'], principals=[aws_iam.ServicePrincipal('cloudtrail.amazonaws.com')], resources=['*'], ) kms_aes_siem.add_to_resource_policy(key_policy_trail1) key_policy_trail2 = aws_iam.PolicyStatement( sid=('Allow CloudTrail to encrypt logs'), actions=['kms:GenerateDataKey*'], principals=[aws_iam.ServicePrincipal('cloudtrail.amazonaws.com')], resources=['*'], conditions={ 'StringLike': { 'kms:EncryptionContext:aws:cloudtrail:arn': [f'arn:aws:cloudtrail:*:{core.Aws.ACCOUNT_ID}:trail/*'] } }) kms_aes_siem.add_to_resource_policy(key_policy_trail2) ###################################################################### # create s3 bucket ###################################################################### block_pub = aws_s3.BlockPublicAccess(block_public_acls=True, ignore_public_acls=True, block_public_policy=True, restrict_public_buckets=True) s3_geo = aws_s3.Bucket( self, 'S3BucketForGeoip', block_public_access=block_pub, bucket_name=s3bucket_name_geo, # removal_policy=core.RemovalPolicy.DESTROY, ) # create s3 bucket for log collector s3_log = aws_s3.Bucket( self, 'S3BucketForLog', block_public_access=block_pub, bucket_name=s3bucket_name_log, versioned=True, encryption=aws_s3.BucketEncryption.S3_MANAGED, # removal_policy=core.RemovalPolicy.DESTROY, ) # create s3 bucket for aes snapshot s3_snapshot = aws_s3.Bucket( self, 'S3BucketForSnapshot', block_public_access=block_pub, bucket_name=s3bucket_name_snapshot, # removal_policy=core.RemovalPolicy.DESTROY, ) ###################################################################### # IAM Role ###################################################################### # snaphot rule for AES policydoc_snapshot = aws_iam.PolicyDocument(statements=[ aws_iam.PolicyStatement(actions=['s3:ListBucket'], resources=[s3_snapshot.bucket_arn]), aws_iam.PolicyStatement( actions=['s3:GetObject', 's3:PutObject', 's3:DeleteObject'], resources=[s3_snapshot.bucket_arn + '/*']) ]) aes_siem_snapshot_role = aws_iam.Role( self, 'AesSiemSnapshotRole', role_name='aes-siem-snapshot-role', inline_policies=[ policydoc_snapshot, ], assumed_by=aws_iam.ServicePrincipal('es.amazonaws.com')) policydoc_assume_snapshrole = aws_iam.PolicyDocument(statements=[ aws_iam.PolicyStatement( actions=['iam:PassRole'], resources=[aes_siem_snapshot_role.role_arn]), ]) aes_siem_deploy_role_for_lambda = aws_iam.Role( self, 'AesSiemDeployRoleForLambda', role_name='aes-siem-deploy-role-for-lambda', managed_policies=[ aws_iam.ManagedPolicy.from_aws_managed_policy_name( 'AmazonESFullAccess'), aws_iam.ManagedPolicy.from_aws_managed_policy_name( 'service-role/AWSLambdaBasicExecutionRole'), ], inline_policies=[policydoc_assume_snapshrole, policydoc_snapshot], assumed_by=aws_iam.ServicePrincipal('lambda.amazonaws.com')) if vpc_type: aes_siem_deploy_role_for_lambda.add_managed_policy( aws_iam.ManagedPolicy.from_aws_managed_policy_name( 'service-role/AWSLambdaVPCAccessExecutionRole')) # for alert from Amazon ES aes_siem_sns_role = aws_iam.Role( self, 'AesSiemSnsRole', role_name='aes-siem-sns-role', assumed_by=aws_iam.ServicePrincipal('es.amazonaws.com')) ###################################################################### # in VPC ###################################################################### if vpc_type: slr_aes = aws_iam.CfnServiceLinkedRole( self, 'AWSServiceRoleForAmazonElasticsearchService', aws_service_name='es.amazonaws.com', description='Created by cloudformation of aes-siem stack') slr_aes.cfn_options.deletion_policy = core.CfnDeletionPolicy.RETAIN ###################################################################### # SQS for es-laoder's DLQ ###################################################################### sqs_aes_siem_dlq = aws_sqs.Queue( self, 'AesSiemDlq', queue_name='aes-siem-dlq', retention_period=core.Duration.days(14)) ###################################################################### # Setup Lambda ###################################################################### # setup lambda of es_loader lambda_es_loader_vpc_kwargs = {} if vpc_type: lambda_es_loader_vpc_kwargs = { 'security_group': sg_vpc_noinbound_aes_siem, 'vpc': vpc_aes_siem, 'vpc_subnets': vpc_subnets, } lambda_es_loader = aws_lambda.Function( self, 'LambdaEsLoader', **lambda_es_loader_vpc_kwargs, function_name='aes-siem-es-loader', runtime=aws_lambda.Runtime.PYTHON_3_8, # code=aws_lambda.Code.asset('../lambda/es_loader.zip'), code=aws_lambda.Code.asset('../lambda/es_loader'), handler='index.lambda_handler', memory_size=512, timeout=core.Duration.seconds(600), dead_letter_queue_enabled=True, dead_letter_queue=sqs_aes_siem_dlq, environment={'GEOIP_BUCKET': s3bucket_name_geo}) es_loader_newver = lambda_es_loader.add_version( name=__version__, description=__version__) es_loader_opt = es_loader_newver.node.default_child.cfn_options es_loader_opt.deletion_policy = core.CfnDeletionPolicy.RETAIN # send only # sqs_aes_siem_dlq.grant(lambda_es_loader, 'sqs:SendMessage') # send and reieve. but it must be loop sqs_aes_siem_dlq.grant(lambda_es_loader, 'sqs:SendMessage', 'sqs:ReceiveMessage', 'sqs:DeleteMessage', 'sqs:GetQueueAttributes') lambda_geo = aws_lambda.Function( self, 'LambdaGeoipDownloader', function_name='aes-siem-geoip-downloader', runtime=aws_lambda.Runtime.PYTHON_3_8, code=aws_lambda.Code.asset('../lambda/geoip_downloader'), handler='index.lambda_handler', memory_size=320, timeout=core.Duration.seconds(300), environment={ 's3bucket_name': s3bucket_name_geo, 'license_key': geoip_license_key.value_as_string, }) lambda_geo_newver = lambda_geo.add_version(name=__version__, description=__version__) lamba_geo_opt = lambda_geo_newver.node.default_child.cfn_options lamba_geo_opt.deletion_policy = core.CfnDeletionPolicy.RETAIN ###################################################################### # setup elasticsearch ###################################################################### lambda_deploy_es = aws_lambda.Function( self, 'LambdaDeployAES', function_name='aes-siem-deploy-aes', runtime=aws_lambda.Runtime.PYTHON_3_8, # code=aws_lambda.Code.asset('../lambda/deploy_es.zip'), code=aws_lambda.Code.asset('../lambda/deploy_es'), handler='index.aes_domain_handler', memory_size=128, timeout=core.Duration.seconds(720), environment={ 'accountid': core.Aws.ACCOUNT_ID, 'aes_domain_name': aes_domain_name, 'aes_admin_role': aes_siem_deploy_role_for_lambda.role_arn, 'es_loader_role': lambda_es_loader.role.role_arn, 'allow_source_address': allow_source_address.value_as_string, }, role=aes_siem_deploy_role_for_lambda, ) if vpc_type: lambda_deploy_es.add_environment('vpc_subnet_id', subnet1.subnet_id) lambda_deploy_es.add_environment('security_group_id', sg_vpc_aes_siem.security_group_id) else: lambda_deploy_es.add_environment('vpc_subnet_id', 'None') lambda_deploy_es.add_environment('security_group_id', 'None') deploy_es_newver = lambda_deploy_es.add_version( name=__version__, description=__version__) deploy_es_opt = deploy_es_newver.node.default_child.cfn_options deploy_es_opt.deletion_policy = core.CfnDeletionPolicy.RETAIN # execute lambda_deploy_es to deploy Amaozon ES Domain aes_domain = aws_cloudformation.CfnCustomResource( self, 'AesSiemDomainDeployedR2', service_token=lambda_deploy_es.function_arn, ) aes_domain.add_override('Properties.ConfigVersion', __version__) aes_domain.cfn_options.deletion_policy = core.CfnDeletionPolicy.RETAIN es_endpoint = aes_domain.get_att('es_endpoint').to_string() lambda_es_loader.add_environment('ES_ENDPOINT', es_endpoint) lambda_configure_es_vpc_kwargs = {} if vpc_type: lambda_configure_es_vpc_kwargs = { 'security_group': sg_vpc_noinbound_aes_siem, 'vpc': vpc_aes_siem, 'vpc_subnets': aws_ec2.SubnetSelection(subnets=[ subnet1, ]), } lambda_configure_es = aws_lambda.Function( self, 'LambdaConfigureAES', **lambda_configure_es_vpc_kwargs, function_name='aes-siem-configure-aes', runtime=aws_lambda.Runtime.PYTHON_3_8, code=aws_lambda.Code.asset('../lambda/deploy_es'), handler='index.aes_config_handler', memory_size=128, timeout=core.Duration.seconds(300), environment={ 'accountid': core.Aws.ACCOUNT_ID, 'aes_domain_name': aes_domain_name, 'aes_admin_role': aes_siem_deploy_role_for_lambda.role_arn, 'es_loader_role': lambda_es_loader.role.role_arn, 'allow_source_address': allow_source_address.value_as_string, 'es_endpoint': es_endpoint, }, role=aes_siem_deploy_role_for_lambda, ) if vpc_type: lambda_configure_es.add_environment('vpc_subnet_id', subnet1.subnet_id) lambda_configure_es.add_environment( 'security_group_id', sg_vpc_aes_siem.security_group_id) else: lambda_configure_es.add_environment('vpc_subnet_id', 'None') lambda_configure_es.add_environment('security_group_id', 'None') configure_es_newver = lambda_configure_es.add_version( name=__version__, description=__version__) configure_es_opt = configure_es_newver.node.default_child.cfn_options configure_es_opt.deletion_policy = core.CfnDeletionPolicy.RETAIN aes_config = aws_cloudformation.CfnCustomResource( self, 'AesSiemDomainConfiguredR2', service_token=lambda_configure_es.function_arn, ) aes_config.add_override('Properties.ConfigVersion', __version__) aes_config.add_depends_on(aes_domain) aes_config.cfn_options.deletion_policy = core.CfnDeletionPolicy.RETAIN es_arn = (f'arn:aws:es:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}' f':domain/{aes_domain_name}') # grant permission to es_loader role lambda_es_loader.role.attach_inline_policy( aws_iam.Policy(self, 'aes-siem-policy-to-load-entries-to-es', policy_name='aes-siem-policy-to-load-entries-to-es', statements=[ aws_iam.PolicyStatement(actions=['es:*'], resources=[ es_arn + '/*', ]), ])) # grant additional permission to es_loader role additional_kms_cmks = self.node.try_get_context('additional_kms_cmks') if additional_kms_cmks: lambda_es_loader.role.attach_inline_policy( aws_iam.Policy(self, 'access_to_additional_cmks', policy_name='access_to_additional_cmks', statements=[ aws_iam.PolicyStatement( actions=['kms:Decrypt'], resources=sorted( set(additional_kms_cmks))) ])) additional_buckets = self.node.try_get_context('additional_s3_buckets') if additional_buckets: buckets_list = [] for bucket in additional_buckets: buckets_list.append(f'arn:aws:s3:::{bucket}') buckets_list.append(f'arn:aws:s3:::{bucket}/*') lambda_es_loader.role.attach_inline_policy( aws_iam.Policy(self, 'access_to_additional_buckets', policy_name='access_to_additional_buckets', statements=[ aws_iam.PolicyStatement( actions=[ 's3:GetObject*', 's3:GetBucket*', 's3:List*' ], resources=sorted(set(buckets_list))) ])) kms_aes_siem.grant_decrypt(lambda_es_loader) ###################################################################### # s3 notification and grant permisssion ###################################################################### s3_geo.grant_read_write(lambda_geo) s3_geo.grant_read(lambda_es_loader) s3_log.grant_read(lambda_es_loader) # create s3 notification for es_loader notification = aws_s3_notifications.LambdaDestination(lambda_es_loader) # assign notification for the s3 PUT event type # most log system use PUT, but also CLB use POST & Multipart Upload s3_log.add_event_notification( aws_s3.EventType.OBJECT_CREATED, notification, aws_s3.NotificationKeyFilter(prefix='AWSLogs/')) # For user logs, not AWS logs s3_log.add_event_notification( aws_s3.EventType.OBJECT_CREATED, notification, aws_s3.NotificationKeyFilter(prefix='UserLogs/')) # Download geoip to S3 once by executing lambda_geo get_geodb = aws_cloudformation.CfnCustomResource( self, 'ExecLambdaGeoipDownloader', service_token=lambda_geo.function_arn, ) get_geodb.cfn_options.deletion_policy = core.CfnDeletionPolicy.RETAIN # Download geoip every day at 6PM UTC rule = aws_events.Rule( self, 'CwlRuleLambdaGeoipDownloaderDilly', schedule=aws_events.Schedule.cron(minute='20', hour='0', month='*', week_day='*', year='*'), ) rule.add_target(aws_events_targets.LambdaFunction(lambda_geo)) ###################################################################### # bucket policy ###################################################################### s3_awspath = s3_log.bucket_arn + '/AWSLogs/' + core.Aws.ACCOUNT_ID bucket_policy_common1 = aws_iam.PolicyStatement( sid='ELB Policy', principals=[ aws_iam.AccountPrincipal(account_id=elb_accounts.find_in_map( core.Aws.REGION, 'accountid')) ], actions=['s3:PutObject'], resources=[s3_awspath + '/*'], ) # NLB / ALB / R53resolver / VPC Flow Logs bucket_policy_elb1 = aws_iam.PolicyStatement( sid='AWSLogDeliveryAclCheck For ALB NLB R53Resolver Flowlogs', principals=[ aws_iam.ServicePrincipal('delivery.logs.amazonaws.com') ], actions=['s3:GetBucketAcl', 's3:ListBucket'], resources=[s3_log.bucket_arn], ) bucket_policy_elb2 = aws_iam.PolicyStatement( sid='AWSLogDeliveryWrite For ALB NLB R53Resolver Flowlogs', principals=[ aws_iam.ServicePrincipal('delivery.logs.amazonaws.com') ], actions=['s3:PutObject'], resources=[s3_awspath + '/*'], conditions={ 'StringEquals': { 's3:x-amz-acl': 'bucket-owner-full-control' } }) s3_log.add_to_resource_policy(bucket_policy_common1) s3_log.add_to_resource_policy(bucket_policy_elb1) s3_log.add_to_resource_policy(bucket_policy_elb2) # CloudTrail bucket_policy_trail1 = aws_iam.PolicyStatement( sid='AWSLogDeliveryAclCheck For Cloudtrail', principals=[aws_iam.ServicePrincipal('cloudtrail.amazonaws.com')], actions=['s3:GetBucketAcl'], resources=[s3_log.bucket_arn], ) bucket_policy_trail2 = aws_iam.PolicyStatement( sid='AWSLogDeliveryWrite For CloudTrail', principals=[aws_iam.ServicePrincipal('cloudtrail.amazonaws.com')], actions=['s3:PutObject'], resources=[s3_awspath + '/*'], conditions={ 'StringEquals': { 's3:x-amz-acl': 'bucket-owner-full-control' } }) s3_log.add_to_resource_policy(bucket_policy_trail1) s3_log.add_to_resource_policy(bucket_policy_trail2) # GuardDuty bucket_policy_gd1 = aws_iam.PolicyStatement( sid='Allow GuardDuty to use the getBucketLocation operation', principals=[aws_iam.ServicePrincipal('guardduty.amazonaws.com')], actions=['s3:GetBucketLocation'], resources=[s3_log.bucket_arn], ) bucket_policy_gd2 = aws_iam.PolicyStatement( sid='Allow GuardDuty to upload objects to the bucket', principals=[aws_iam.ServicePrincipal('guardduty.amazonaws.com')], actions=['s3:PutObject'], resources=[s3_log.bucket_arn + '/*'], ) bucket_policy_gd5 = aws_iam.PolicyStatement( sid='Deny non-HTTPS access', effect=aws_iam.Effect.DENY, actions=['s3:*'], resources=[s3_log.bucket_arn + '/*'], conditions={'Bool': { 'aws:SecureTransport': 'false' }}) bucket_policy_gd5.add_any_principal() s3_log.add_to_resource_policy(bucket_policy_gd1) s3_log.add_to_resource_policy(bucket_policy_gd2) s3_log.add_to_resource_policy(bucket_policy_gd5) # Config bucket_policy_config1 = aws_iam.PolicyStatement( sid='AWSConfig BucketPermissionsCheck and BucketExistenceCheck', principals=[aws_iam.ServicePrincipal('config.amazonaws.com')], actions=['s3:GetBucketAcl', 's3:ListBucket'], resources=[s3_log.bucket_arn], ) bucket_policy_config2 = aws_iam.PolicyStatement( sid='AWSConfigBucketDelivery', principals=[aws_iam.ServicePrincipal('config.amazonaws.com')], actions=['s3:PutObject'], resources=[s3_awspath + '/Config/*'], conditions={ 'StringEquals': { 's3:x-amz-acl': 'bucket-owner-full-control' } }) s3_log.add_to_resource_policy(bucket_policy_config1) s3_log.add_to_resource_policy(bucket_policy_config2) # geoip bucket_policy_geo1 = aws_iam.PolicyStatement( sid='Allow geoip downloader and es-loader to read/write', principals=[lambda_es_loader.role, lambda_geo.role], actions=['s3:PutObject', 's3:GetObject', 's3:DeleteObject'], resources=[s3_geo.bucket_arn + '/*'], ) s3_geo.add_to_resource_policy(bucket_policy_geo1) # ES Snapshot bucket_policy_snapshot = aws_iam.PolicyStatement( sid='Allow ES to store snapshot', principals=[aes_siem_snapshot_role], actions=['s3:PutObject', 's3:GetObject', 's3:DeleteObject'], resources=[s3_snapshot.bucket_arn + '/*'], ) s3_snapshot.add_to_resource_policy(bucket_policy_snapshot) ###################################################################### # for multiaccount / organizaitons ###################################################################### if org_id or no_org_ids: ################################################################## # KMS key policy for multiaccount / organizaitons ################################################################## # for CloudTrail cond_tail2 = self.make_resource_list( path='arn:aws:cloudtrail:*:', tail=':trail/*', keys=[org_mgmt_id, no_org_ids]) key_policy_mul_trail2 = aws_iam.PolicyStatement( sid=('Allow CloudTrail to encrypt logs for multiaccounts'), actions=['kms:GenerateDataKey*'], principals=[ aws_iam.ServicePrincipal('cloudtrail.amazonaws.com') ], resources=['*'], conditions={ 'StringLike': { 'kms:EncryptionContext:aws:cloudtrail:arn': cond_tail2 } }) kms_aes_siem.add_to_resource_policy(key_policy_mul_trail2) # for replicaiton key_policy_rep1 = aws_iam.PolicyStatement( sid=('Enable cross account encrypt access for S3 Cross Region ' 'Replication'), actions=['kms:Encrypt'], principals=self.make_account_plincipals( org_mgmt_id, org_member_ids, no_org_ids), resources=['*'], ) kms_aes_siem.add_to_resource_policy(key_policy_rep1) ################################################################## # Buckdet Policy for multiaccount / organizaitons ################################################################## s3_log_bucket_arn = 'arn:aws:s3:::' + s3bucket_name_log # for CloudTrail s3_mulpaths = self.make_resource_list( path=f'{s3_log_bucket_arn}/AWSLogs/', tail='/*', keys=[org_id, org_mgmt_id, no_org_ids]) bucket_policy_org_trail = aws_iam.PolicyStatement( sid='AWSCloudTrailWrite for Multiaccounts / Organizations', principals=[ aws_iam.ServicePrincipal('cloudtrail.amazonaws.com') ], actions=['s3:PutObject'], resources=s3_mulpaths, conditions={ 'StringEquals': { 's3:x-amz-acl': 'bucket-owner-full-control' } }) s3_log.add_to_resource_policy(bucket_policy_org_trail) # config s3_conf_multpaths = self.make_resource_list( path=f'{s3_log_bucket_arn}/AWSLogs/', tail='/Config/*', keys=[org_id, org_mgmt_id, no_org_ids]) bucket_policy_mul_config2 = aws_iam.PolicyStatement( sid='AWSConfigBucketDelivery', principals=[aws_iam.ServicePrincipal('config.amazonaws.com')], actions=['s3:PutObject'], resources=s3_conf_multpaths, conditions={ 'StringEquals': { 's3:x-amz-acl': 'bucket-owner-full-control' } }) s3_log.add_to_resource_policy(bucket_policy_mul_config2) # for replication bucket_policy_rep1 = aws_iam.PolicyStatement( sid='PolicyForDestinationBucket / Permissions on objects', principals=self.make_account_plincipals( org_mgmt_id, org_member_ids, no_org_ids), actions=[ 's3:ReplicateDelete', 's3:ReplicateObject', 's3:ReplicateTags', 's3:GetObjectVersionTagging', 's3:ObjectOwnerOverrideToBucketOwner' ], resources=[f'{s3_log_bucket_arn}/*']) bucket_policy_rep2 = aws_iam.PolicyStatement( sid='PolicyForDestinationBucket / Permissions on bucket', principals=self.make_account_plincipals( org_mgmt_id, org_member_ids, no_org_ids), actions=[ 's3:List*', 's3:GetBucketVersioning', 's3:PutBucketVersioning' ], resources=[f'{s3_log_bucket_arn}']) s3_log.add_to_resource_policy(bucket_policy_rep1) s3_log.add_to_resource_policy(bucket_policy_rep2) ###################################################################### # SNS topic for Amazon ES Alert ###################################################################### sns_topic = aws_sns.Topic(self, 'SnsTopic', topic_name='aes-siem-alert', display_name='AES SIEM') sns_topic.add_subscription( aws_sns_subscriptions.EmailSubscription( email_address=sns_email.value_as_string)) sns_topic.grant_publish(aes_siem_sns_role) ###################################################################### # output of CFn ###################################################################### kibanaurl = f'https://{es_endpoint}/_plugin/kibana/' kibanaadmin = aes_domain.get_att('kibanaadmin').to_string() kibanapass = aes_domain.get_att('kibanapass').to_string() core.CfnOutput(self, 'RoleDeploy', export_name='role-deploy', value=aes_siem_deploy_role_for_lambda.role_arn) core.CfnOutput(self, 'KibanaUrl', export_name='kibana-url', value=kibanaurl) core.CfnOutput(self, 'KibanaPassword', export_name='kibana-pass', value=kibanapass, description='Please change the password in Kibana ASAP') core.CfnOutput(self, 'KibanaAdmin', export_name='kibana-admin', value=kibanaadmin)
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) self.current_dir = os.path.dirname(__file__) self.quicksight_migration_source_assume_role = iam.Role( self, 'quicksight-migration-source-assume-role', description='Role for the Quicksight dashboard migration Lambdas to assume', role_name='quicksight-migration-source-assume-role', max_session_duration=core.Duration.seconds(3600), assumed_by=iam.ServicePrincipal('lambda.amazonaws.com'), inline_policies={ 'AllowAccess': iam.PolicyDocument( statements=[ iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ "quicksight:*", ], resources=["*"] ), iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ "ssm:GetParameter", ], resources=["arn:aws:ssm:*:*:parameter/infra/config"] ) ] ) } ) self.quicksight_migration_source_assume_role.assume_role_policy.add_statements( iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=['sts:AssumeRole'], principals=[iam.AccountPrincipal("499080683179")] ) ) self.quicksight_migration_target_assume_role = iam.Role( self, 'quicksight-migration-target-assume-role', description='Role for the Quicksight dashboard migration Lambdas to assume', role_name='quicksight-migration-target-assume-role', max_session_duration=core.Duration.seconds(3600), assumed_by=iam.ServicePrincipal('lambda.amazonaws.com'), inline_policies={ 'AllowAccess': iam.PolicyDocument( statements=[ iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ "quicksight:*", ], resources=["*"] ), iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ "ssm:GetParameter", ], resources=["arn:aws:ssm:*:*:parameter/infra/config"] ) ] ) } ) self.quicksight_migration_target_assume_role.assume_role_policy.add_statements( iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=['sts:AssumeRole'], principals=[iam.AccountPrincipal("499080683179")] ) ) self.vpc = ec2.Vpc(self, "VPC", cidr="10.0.0.0/21", max_azs=3, subnet_configuration=[ ec2.SubnetConfiguration( cidr_mask=28, name="Database", subnet_type=ec2.SubnetType.ISOLATED, ) ] ) self.vpc.add_interface_endpoint("redshift_endpoint", service=ec2.InterfaceVpcEndpointAwsService("redshift") ) self.vpc.add_interface_endpoint("rds_endpoint", service=ec2.InterfaceVpcEndpointAwsService("rds") ) self.redshift_secret = secrets.Secret(self,'redshift-admin', secret_name='redshift-admin', description="This secret has generated admin secret password for Redshift cluster", generate_secret_string=secrets.SecretStringGenerator( secret_string_template='{"username": "******"}', generate_string_key='password', password_length=32, exclude_characters='"@\\\/', exclude_punctuation=True ) ) self.redshift_cluster = redshift.Cluster(self, "datasource-redshift", master_user=redshift.Login( master_username="******", master_password=self.redshift_secret.secret_value_from_json('password') ), vpc=self.vpc, vpc_subnets=ec2.SubnetSelection( subnet_type=ec2.SubnetType.ISOLATED ) ) self.rds_secret = secrets.Secret(self,'rds-admin', secret_name='rds-admin', description="This secret has generated admin secret password for RDS cluster", generate_secret_string=secrets.SecretStringGenerator( secret_string_template='{"username": "******"}', generate_string_key='password', password_length=32, exclude_characters='"@\\\/', exclude_punctuation=True ) ) self.rds_cluster = rds.DatabaseCluster(self, "datasource-rds", engine=rds.DatabaseClusterEngine.aurora_mysql( version=rds.AuroraMysqlEngineVersion.VER_2_08_1), instance_props={ "vpc_subnets": { "subnet_type": ec2.SubnetType.ISOLATED }, "vpc": self.vpc }, credentials=rds.Credentials.from_secret(self.rds_secret) ) ssm.StringParameter(self, 'InfraConfigParam', parameter_name='/infra/config', string_value=json.dumps(self.to_dict()))
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # Let us IAM Users & Groups): user1_pass = _secretsmanager.Secret(self, "user1Pass", description="Password for User1", secret_name="user1_pass") # Add User1 with SecretsManager Password user1 = _iam.User(self, "user1", password=user1_pass.secret_value, user_name="user1") # Add IAM Group konstone_group = _iam.Group(self, "konStoneGroup", group_name="konstone_group") # Add User to Group konstone_group.add_user(user1) # Add Managed Policy to Group konstone_group.add_managed_policy( _iam.ManagedPolicy.from_aws_managed_policy_name( "AmazonS3ReadOnlyAccess")) # SSM parameter 1 param1 = _ssm.StringParameter(self, "parameter1", description="Keys To KonStone", parameter_name="/konstone/keys/fish", string_value="130481", tier=_ssm.ParameterTier.STANDARD) # SSM parameter 2 param2 = _ssm.StringParameter( self, "parameter2", description="Keys To KonStone", parameter_name="/konstone/keys/fish/gold", string_value="130482", tier=_ssm.ParameterTier.STANDARD) # Grant Konstone group permission to Param 1 # grant access to individual resource to group param1.grant_read(konstone_group) # Grant Group to LIST ALL SSM Parameters in Console grpStmt1 = _iam.PolicyStatement(effect=_iam.Effect.ALLOW, resources=["*"], actions=["ssm:DescribeParameters"]) grpStmt1.sid = "DescribeAllParametersInConsole" #description of policy # Add Permissions To Group konstone_group.add_to_policy(grpStmt1) # Create IAM Role konstone_ops_role = _iam.Role( self, 'konstoneOpsRole', assumed_by=_iam.AccountPrincipal(f"{core.Aws.ACCOUNT_ID}"), role_name="konstone_ops_role") # Create Managed Policy & Attach to Role list_ec2_policy = _iam.ManagedPolicy( self, "listEc2Instances", description="list ec2 isntances in the account", managed_policy_name="list_ec2_policy", statements=[ _iam.PolicyStatement(effect=_iam.Effect.ALLOW, actions=[ "ec2:Describe*", "cloudwatch:Describe*", "cloudwatch:Get*" ], resources=["*"]) ], roles=[konstone_ops_role]) # Login Url Autogeneration output_1 = core.CfnOutput( self, "user1LoginUrl", description="LoginUrl for User1", value=f"https://{core.Aws.ACCOUNT_ID}.signin.aws.amazon.com/console" )