def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # The code that defines your stack goes here _s3.Bucket(self, "mybucketid", bucket_name="my1stcdkbucket2020", versioned=False, encryption=_s3.BucketEncryption.S3_MANAGED, block_public_access=_s3.BlockPublicAccess.BLOCK_ALL) mybucket = _s3.Bucket( self, "mybucketid1", ) output_1 = core.CfnOutput(self, "mybucketOuput1", value=mybucket.bucket_name, description="my first cdk bucket", export_name="mybucketOuput1") _iam.Group(self, "cdkgid") snstopicname = "abczys1234" if not core.Token.is_unresolved( snstopicname) and len(snstopicname) > 10: raise ValueError("Maximum value can be only 10 characters") print(mybucket.bucket_name)
def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) # The code that defines your stack goes here _s3.Bucket( self, # Logical identifier "VijayBucketID", bucket_name="vijay-cdk-testing", versioned=False, encryption=_s3.BucketEncryption.S3_MANAGED, block_public_access=_s3.BlockPublicAccess.BLOCK_ALL) mybucket = _s3.Bucket( self, "VijayBucketID-1", ) output_1 = core.CfnOutput(self, "VijayBucketOutput1", value=mybucket.bucket_name, description=f"My First CDK Bucket", export_name="VijayBucketOutput1") _iam.Group(self, "VijayGroupID")
def __init__(self, scope: cdk.Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) # The code that defines your stack goes here variableB = _s3.Bucket( self, "myBucketId", bucket_name="dvpbucket512", versioned=False, encryption=_s3.BucketEncryption.KMS_MANAGED, block_public_access=_s3.BlockPublicAccess.BLOCK_ALL) bucketvar = _s3.Bucket(self, "bucketvar") _iam.Group(self, "dvpgroup", group_name="dvpgroup") buckoutput = core.CfnOutput(self, "FirstBucketOutput", value=bucketvar.bucket_name, description=f"My first bucket output", export_name="FirstBucketOpt") print(buckoutput.export_name)
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) mybucket0 = _s3.Bucket( self, "myBucketId", bucket_name="first-cdk-project-1111", versioned=False, encryption=_s3.BucketEncryption.S3_MANAGED, block_public_access=_s3.BlockPublicAccess.BLOCK_ALL, ) print(f"bucket name - {mybucket0.bucket_name}") # snstopicname = "abcdefg" # if not core.Token.is_unresolved(snstopicname) and len(snstopicname) > 5: # raise ValueError("Maximum value can be only 5 chracters") mybucket = _s3.Bucket(self, "myBucketId1") output1 = core.CfnOutput( self, "myBucketOutput1", value=mybucket.bucket_name, description=f"My First CDK Bucket", export_name="myBucketOutput1", ) _iam.Group(self, "gid", group_name="test-group")
def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) # The code that defines your stack goes here aws_s3.Bucket( self, "awscdk-infra", bucket_name="awscdk-infra-manuja", versioned=False, encryption=aws_s3.BucketEncryption.S3_MANAGED, block_public_access=aws_s3.BlockPublicAccess.BLOCK_ALL ) mybucket = aws_s3.Bucket( self, "awscdk-infra-2" ) aws_iam.Group( self, "gid" ) output_1 = core.CfnOutput( self, "MyBucketOutput1", value=mybucket.bucket_name, description=f"Test CDK Bucket", export_name="MyBucketOutput1" )
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) sageMakerPrincipal = iam.ServicePrincipal('sagemaker.amazonaws.com') role = iam.Role( self, 'WorkshopRole', assumed_by = sageMakerPrincipal, role_name = 'amazon-sagemaker-in-practice-workshop-role' ) managed_policy_arn = 'arn:aws:iam::aws:policy/AmazonSageMakerFullAccess' role.add_managed_policy(iam.ManagedPolicy.from_aws_managed_policy_name(managed_policy_arn)) participants_group = iam.Group(self, 'WorkshopParticipantsGroup'); policy = iam.Policy(self, 'WorkshopParticipantsPolicy'); permissions = [ "sagemaker:*", "ecr:*", "cloudwatch:*", "logs:*", "s3:GetBucketLocation", "s3:ListAllMyBuckets", "iam:ListRoles", "iam:GetRole" ]; defaultStatement = iam.PolicyStatement(effect=iam.Effect.ALLOW) defaultStatement.add_all_resources() defaultStatement.add_actions(*permissions) value = { 'iam:PassedToService': sageMakerPrincipal.to_string() } passRole = iam.PolicyStatement(effect=iam.Effect.ALLOW) passRole.add_all_resources() passRole.add_actions("iam:PassRole") passRole.add_condition("StringEquals", value) policy.add_statements(defaultStatement, passRole); participants_group.attach_inline_policy(policy); existing_bucket_arn = 'arn:aws:s3:::existing-bucket-for-workshop' data_source = s3.Bucket.from_bucket_arn(self, 'DataSourceBucket', existing_bucket_arn) data_source.grant_read(participants_group) amount = kwargs.get("env").get("participants_count") password = kwargs.get("env").get("password") ParticipantsConstruct( self, "WorkshopParticipantsConstruct", num=amount, password=password, group=participants_group )
def __init__(self, scope: Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # create new IAM group and user group = iam.Group(self, "RekGroup") user = iam.User(self, "RekUser") # add IAM user to the new group user.add_to_group(group) # create S3 bucket to hold images # give new user access to the bucket bucket = s3.Bucket(self, 'Bucket') bucket.grant_read_write(user) # create DynamoDB table to hold Rekognition results table = ddb.Table(self, 'Classifications', partition_key={ 'name': 'image_name', 'type': ddb.AttributeType.STRING }) # create Lambda function lambda_function = _lambda.Function( self, 'RekFunction', runtime=_lambda.Runtime.PYTHON_3_8, handler='rekfunction.handler', code=_lambda.Code.from_asset( 'rekognition_lambda_s3_trigger/lambda'), environment={ 'BUCKET_NAME': bucket.bucket_name, 'TABLE_NAME': table.table_name }) # add Rekognition permissions for Lambda function statement = iam.PolicyStatement() statement.add_actions("rekognition:DetectLabels") statement.add_resources("*") lambda_function.add_to_role_policy(statement) # create trigger for Lambda function with image type suffixes notification = s3_notifications.LambdaDestination(lambda_function) notification.bind(self, bucket) bucket.add_object_created_notification( notification, s3.NotificationKeyFilter(suffix='.jpg')) bucket.add_object_created_notification( notification, s3.NotificationKeyFilter(suffix='.jpeg')) bucket.add_object_created_notification( notification, s3.NotificationKeyFilter(suffix='.png')) # grant permissions for lambda to read/write to DynamoDB table and bucket table.grant_read_write_data(lambda_function) bucket.grant_read_write(lambda_function)
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id) # IAM Group hands_on_cdk_developers = iam.Group(self, "HandsOnCdkDevelopers", group_name="HandsOnCdkDevelopers") groups_map = {"HandsOnCdk": hands_on_cdk_developers} # IAM User for user in read_config_users(): iam.User( self, f"{user['group']}Developer-{user['first_name']}{user['last_name']}", user_name= f"{user['group']}Developer-{user['first_name']}{user['last_name']}", password=core.SecretValue.plain_text(user['password']), groups=[groups_map[user["group"]]]) # IAM Role iam.Role(self, "HandsOnCdkDevelopers-Role-PowerUserAccess", role_name="HandsOnCdkDevelopers-Role-PowerUserAccess", assumed_by=iam.AccountPrincipal( core.ScopedAws(scope).account_id), managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name( "PowerUserAccess") ]) # IAM Policy iam.Policy( self, "HandsOnCdkDevelopers-Policy-SourceMfaRestriction", policy_name="HandsOnCdkDevelopers-Policy-SourceMfaRestriction", force=True, groups=[hands_on_cdk_developers], statements=[ statement for statement in read_config_source_mfa_restriction() ]) iam.Policy(self, "HandsOnCdkDevelopers-Policy-OnlySwitchRole", policy_name="HandsOnCdkDevelopers-Policy-OnlySwitchRole", force=True, groups=[hands_on_cdk_developers], statements=[ statement for statement in read_config_only_switch_role() ])
def init_bot_group(self): statement = iam.PolicyStatement(effect=iam.Effect.ALLOW) statement.add_actions("ecs:ListClusters", "ecs:ListServices", "ecs:DescribeServices", "ecs:ListTasks", "ecs:DescribeTasks", "ecs:UpdateService", "ec2:DescribeNetworkInterfaces") statement.add_all_resources() group = iam.Group(self, "minebot-group") group.add_managed_policy( iam.ManagedPolicy(self, "minebot-start-stop-policy", statements=[statement])) return group
def __init__(self, scope: cdk.Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) ''' Create x number of users with a password with x number of random characters. This CDK script can be used to create a number of users for use in workshops or similar. Modify the permissions as needed for your purposes. ''' bucket = s3.Bucket( self, "MyFirstBucket", versioned=True, ) statement = iam.PolicyStatement(actions=permissions, resources=["*"]) group = iam.Group( self, 'test_group', managed_policies=[ iam.ManagedPolicy.from_managed_policy_arn( self, 'AWSCloud9User', managed_policy_arn="arn:aws:iam::aws:policy/AWSCloud9User") ]) group.add_to_policy(statement) users = [] for i in range(number_of_users): account = { 'password': get_random_string(password_length), 'user': f"user-{i}" } user = iam.User(self, account['user'], groups=[group], password=cdk.SecretValue.plain_text( account['password'])) # access_key = iam.CfnAccessKey(self, f"access_key-{i}", user_name = user.user_name) # print(type(access_key.attr_secret_access_key)) cloud9_env = c9.CfnEnvironmentEC2(self, f"cloud9_env_{i}", instance_type='t2.micro', automatic_stop_time_minutes=30, owner_arn=user.user_arn) users.append(account) cdk.CfnOutput(self, f"user-{i}:arn", value=user.user_name) with open('output.csv', "w") as f: dict_writer = csv.DictWriter(f, users[0].keys()) dict_writer.writeheader() dict_writer.writerows(users)
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) bucket=_s3.Bucket(self, "Mybucketid",versioned=True ,encryption=_s3.BucketEncryption.KMS_MANAGED) bucket.s3_url_for_object(key=None) # The code that defines your stack goes here output=core.CfnOutput( self, "MyBucketOutput", value=bucket.bucket_name, description="My first cdk Bucket", export_name="MyBucketOutput" ) _iam.Group(self,"gid")
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # The code that defines your stack goes here _s3.Bucket(self, "myBucketId", versioned=True, encryption=_s3.BucketEncryption.KMS_MANAGED, block_public_access=_s3.BlockPublicAccess.BLOCK_ALL) mybucket = _s3.Bucket(self, "outputbucket") _iam.Group(self, "gid") output_1 = core.CfnOutput(self, "output1", value=mybucket.bucket_name, description="my first output", export_name="outputbucket1")
def __init__(self, scope: cdk.Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) # Creating first S3 Bucket: _s3.Bucket( self, "myBucketId", bucket_name="my-first-cdk-project-luber", versioned=True, encryption=_s3.BucketEncryption.S3_MANAGED, block_public_access=_s3.BlockPublicAccess.BLOCK_ALL, removal_policy=cdk.RemovalPolicy. DESTROY # This object will delete the bucket when destroying # the stack using "cdk destroy" only if the bucket is empty. ) # Creating second S3 Bucket: my_bucket = _s3.Bucket( self, "myBucketId1", bucket_name="my-first-cdk-project-luber-1", removal_policy=cdk.RemovalPolicy. DESTROY # This object will delete the bucket when destroying # the stack using "cdk destroy" only if the bucket is empty. ) snstopicname = "abczys" if not cdk.Token.is_unresolved( snstopicname) and len(snstopicname) > 10: raise ValueError("Maximum value can be only 10 characters") # print(my_bucket.bucket_name) # Create an IAM group: _iam.Group(self, "gid", group_name="MyCDKAdmin") output_1 = cdk.CfnOutput(self, "myBucketOutput1", value=my_bucket.bucket_name, description=f"My first CDK Bucket", export_name="myBucketOutput1")
def __init__(self, scope: core.Construct, id: str, vpc: ec2.Vpc, **kwargs) -> None: super().__init__(scope, id, **kwargs) # AMI amzn_linux = ec2.MachineImage.latest_amazon_linux( generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX_2, edition=ec2.AmazonLinuxEdition.STANDARD, virtualization=ec2.AmazonLinuxVirt.HVM, storage=ec2.AmazonLinuxStorage.GENERAL_PURPOSE ) i = 1 for subnet in vpc.stack.vpc.select_subnets(subnet_group_name="BASTION").subnets: bastion_host = ec2.BastionHostLinux(self, f"ec2-BASTION-Instance{i}", vpc=vpc, subnet_selection=ec2.SubnetSelection( availability_zones=[subnet.availability_zone], subnet_group_name="BASTION" ), instance_type=ec2.InstanceType("t1.micro"), machine_image=amzn_linux ) bastion_host.allow_ssh_access_from(ec2.Peer.any_ipv4()) i += 1 host_admin_group = iam.Group(self, "HostAdmins", ) policy = iam.Policy(self, "HostAdminPolicy", groups=[host_admin_group] ) policy.add_statements(iam.PolicyStatement(effect=iam.Effect.ALLOW, actions=["ec2-instance-connect:SendSSHPublicKey"], resources= [f"arn:aws:ec2:{self.region}:{self.account}:instance/*"], conditions={"StringEquals": {"ec2:osuser": "******"}} ))
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # The code that defines your stack goes here _s3.Bucket(self, "myBucketId", bucket_name="myfirstcdkproject982", versioned=False, encryption=_s3.BucketEncryption.S3_MANAGED, block_public_access=_s3.BlockPublicAccess.BLOCK_ALL) mybucket = _s3.Bucket(self, "myBucketId1") print(mybucket.bucket_name) _iam.Group(self, "gid") output_1 = core.CfnOutput(self, "myBucketOutput1", value=mybucket.bucket_name, description=f"My first CDK Bucket", export_name="myBucketOutput1")
def __init__(self, scope: core.Construct, id: str, ** kwargs) -> None: super().__init__(scope, id, **kwargs) # Let us IAM Users & Groups): user1_pass = _secretsmanager.Secret(self, "user1Pass", description="Password for User1", secret_name="user1_pass" ) # Add User1 with SecretsManager Password user1 = _iam.User(self, "user1", password=user1_pass.secret_value, user_name="user1" ) # Add User2 with Literal Password user2 = _iam.User(self, "user2", password=core.SecretValue.plain_text( "Dont-Use-B@d-Passw0rds" ), user_name="user2" ) # Add IAM Group konstone_group = _iam.Group(self, "konStoneGroup", group_name="konstone_group" ) konstone_group.add_user(user2) # Login Url Autogeneration output_1 = core.CfnOutput(self, "user2LoginUrl", description="LoginUrl for User2", value=f"https://{core.Aws.ACCOUNT_ID}.signin.aws.amazon.com/console" )
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) self.group = aws_iam.Group(self, "Group") self.s3_bucket = aws_s3.Bucket(self, "Bucket") self.s3_bucket.grant_read_write(group)
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) NOTIFY_EMAIL = self.node.try_get_context("notify_email") SLACK_WEBHOOK_URL = self.node.try_get_context("webhook_url") WHITE_LIST_GROUP = self.node.try_get_context("white_list_group") if (not NOTIFY_EMAIL or not SLACK_WEBHOOK_URL or not WHITE_LIST_GROUP): logger.error( f"Required context variables for {id} were not provided!") else: # 1. Create Response Lambda lambda_dir_path = os.path.join(os.getcwd(), "ir_cdk_stacks", "in_clt_01") response_lambda = _lambda.Function( self, "InClt01ResponseFunction", runtime=_lambda.Runtime.PYTHON_3_8, handler="clUnauthAccessResponse.lambda_handler", code=_lambda.Code.from_asset(lambda_dir_path), function_name="InClt01ResponseFunction", environment={ "webhook_url": SLACK_WEBHOOK_URL, "white_list_group": WHITE_LIST_GROUP, }) ep = {"source": ["aws.cloudtrail"]} # 2. Make that rule Track Cloudtrail events rule = events.Rule( self, "cdkRule", description= 'Rule created by CDK for monitoring CloudTrail access', enabled=True, rule_name="CltAccessRule", event_pattern=ep) # 3. Add Permissions and role to Lambda action = [ "iam:*", "organizations:DescribeAccount", "organizations:DescribeOrganization", "organizations:DescribeOrganizationalUnit", "organizations:DescribePolicy", "organizations:ListChildren", "organizations:ListParents", "organizations:ListPoliciesForTarget", "organizations:ListRoots", "organizations:ListPolicies", "organizations:ListTargetsForPolicy" ] response_lambda.add_to_role_policy( iam.PolicyStatement( actions=action, effect=iam.Effect.ALLOW, resources=["*"], )) # 4. Permission to send SNS notification response_lambda.add_to_role_policy( iam.PolicyStatement( actions=["sns:*"], effect=iam.Effect.ALLOW, resources=["*"], )) # 5. Add Lambda as target of Rule rule.add_target(event_target.LambdaFunction(response_lambda)) # 6. Create SNS topic and subscription topic = sns.Topic(self, "CLTAccessCDK", topic_name="CLTAccessCDK") # topic.grant_publish(iam.ServicePrincipal("*")) topic.add_subscription(subs.EmailSubscription(NOTIFY_EMAIL)) # 7. Create IAM allow/deny policy cltDenyAccessPolicy = iam.ManagedPolicy( self, "InCLT01DenyPolicy", managed_policy_name="CltDenyAccess", statements=[ iam.PolicyStatement(effect=iam.Effect.DENY, actions=["cloudtrail:*"], resources=["*"]) ]) # 8. Create IAM group cltAccessGroup = iam.Group(self, "cltAccessGroup", group_name="cltAccessGroup")
def __init__( self, scope: core.Construct, id: str, host_name, cert_arn, zone_id, admin_user: str, admin_password: str, cloud9_instance_size: str, participant_limit: str, **kwargs, ) -> None: super().__init__(scope, id, **kwargs) stack = core.Stack.of(self) stack.template_options.description = "Connected Drink Dispenser Workshop" # Static Website props: StaticSiteProps = StaticSiteProps( fqdn=host_name, hosted_zone_id=zone_id, certificate_arn=cert_arn, error_configuration=[ { "error_code": 403, "error_caching_min_ttl": 300, "response_code": 200, "response_page_path": "/index.html", }, { "error_code": 404, "error_caching_min_ttl": 300, "response_code": 200, "response_page_path": "/index.html", }, ], output_name="CDDWebSite", ) cdd_site = StaticSiteConstruct(self, "StaticSite", props) # Custom resource to clean out static website bucket prior to delete # TODO: Move this to the StaticSiteConstruct as option props: CustomResourceProps = CustomResourceProps( name=id + "-CR-S3DeleteObjects", lambda_directory="./lambda_functions/cr_s3_delete", handler="index.main", timeout=30, runtime=lambda_.Runtime.PYTHON_3_7, environment={"BUCKET_NAME": cdd_site.bucket_name}, ) s3_delete_cr = CustomResourceConstruct(self, "EmptyCddS3Bucket", props) # DependsOn the bucket (we need to delete objects before the bucket is deleted) s3_delete_cr.resource.node.add_dependency(cdd_site.bucket_resource) policy_statement = iam.PolicyStatement() policy_statement.add_actions("s3:GetBucket*") policy_statement.add_actions("s3:GetObject*") policy_statement.add_actions("s3:DeleteObject*") policy_statement.add_actions("s3:List*") policy_statement.add_resources(cdd_site.bucket_resource.bucket_arn) policy_statement.add_resources( f"{cdd_site.bucket_resource.bucket_arn}/*") s3_delete_cr.add_policy_to_role(policy_statement) # IAM Constructs user_group = iam.Group( self, "UserGroup", group_name=id + "-CDDUserGroup", managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name( "ReadOnlyAccess") ], ) # DynamoDB tables user_db = dynamodb.Table( # UserId as key, user "admin" tracks next available dispenser id # No access to users, RW to Cognito Lambda self, "UserTable", table_name=id + "-UserTable", partition_key={ "name": "userName", "type": dynamodb.AttributeType.STRING }, billing_mode=dynamodb.BillingMode.PAY_PER_REQUEST, removal_policy=core.RemovalPolicy.DESTROY, ) dispenser_db = dynamodb.Table( # Dispenser ID and credit amount - RO to users, RW to APIs self, "DispenserTable", table_name=id + "-DispenserTable", partition_key={ "name": "dispenserId", "type": dynamodb.AttributeType.STRING, }, billing_mode=dynamodb.BillingMode.PAY_PER_REQUEST, removal_policy=core.RemovalPolicy.DESTROY, ) dispenser_events = dynamodb.Table( # Recorded events from dispenser actions self, "DispenserEvents", table_name=id + "-DispenserEvents", partition_key={ "name": "dispenserId", "type": dynamodb.AttributeType.STRING, }, sort_key={ "name": "timestamp", "type": dynamodb.AttributeType.STRING }, billing_mode=dynamodb.BillingMode.PAY_PER_REQUEST, removal_policy=core.RemovalPolicy.DESTROY, ) # Cognito Resources # User pool with phone_number as username props: CognitoUserPoolProps = CognitoUserPoolProps( user_pool_name=id + "-users", client_name=id + "-webclient", auto_verified_attributes=["phone_number"], schema=[ { "name": "group", "attributeDataType": "String", "mutable": True, "required": False, }, { "name": "dispenserId", "attributeDataType": "String", "mutable": True, "required": False, }, ], policies={ "passwordPolicy": { "minimumLength": 6, "requireLowercase": True, "requireNumbers": True, "requireSymbols": False, "requireUppercase": False, } }, ) user_pool = CognitoUserPoolConstruct(self, "UserPool", props) # Role and lambda triggers lambda_cognito_access_role = iam.Role( # Access to IDP calls (for triggers) self, "LambdaCognitoAccessRole", assumed_by=iam.ServicePrincipal("lambda.amazonaws.com"), inline_policies=[ iam.PolicyDocument(statements=[ iam.PolicyStatement( actions=[ "logs:CreateLogGroup", "logs:CreateLogStream", "logs:PutLogEvents", ], resources=["arn:aws:logs:*:*:*"], ), iam.PolicyStatement(actions=["cognito-idp:*"], resources=["*"]), iam.PolicyStatement(actions=["dynamodb:*"], resources=["*"]), ]) ], ) # Triggers for UserPool # Pre-sign-up: triggered when username, password, and phone number submitted lambda_cognito_trigger_pre_signup = lambda_.Function( self, "CogntioTriggerPreSignUp", function_name=id + "-CogntioTriggerPreSignUp", code=lambda_.AssetCode("./lambda_functions/cog_pre_signup"), handler="lambda.handler", runtime=lambda_.Runtime.PYTHON_3_7, role=lambda_cognito_access_role, timeout=core.Duration.seconds(6), environment={ "USER_TABLE": user_db.table_name, "PARTICIPANT_LIMIT": participant_limit, }, ) lambda_cognito_trigger_pre_signup.add_permission( "AllowCognitoPreSign", principal=iam.ServicePrincipal("cognito-idp.amazonaws.com"), source_arn=user_pool.user_pool_arn, ) # Post confirmation: triggered after validation code provided lambda_cognito_trigger_post_confirm = lambda_.Function( self, "CogntioTriggerPostConfirm", function_name=id + "-CogntioTriggerPostConfirm", code=lambda_.AssetCode("./lambda_functions/cog_post_confirm"), handler="lambda.handler", runtime=lambda_.Runtime.PYTHON_3_7, role=lambda_cognito_access_role, timeout=core.Duration.seconds(6), environment={ "USER_TABLE": user_db.table_name, "PARTICIPANT_LIMIT": participant_limit, }, ) lambda_cognito_trigger_post_confirm.add_permission( "AllowCognitoPostConfirm", principal=iam.ServicePrincipal("cognito-idp.amazonaws.com"), source_arn=user_pool.user_pool_arn, ) # Attach triggers to pool user_pool.user_pool.lambda_config = cognito.CfnUserPool.LambdaConfigProperty( pre_sign_up=lambda_cognito_trigger_pre_signup.function_arn, post_confirmation=lambda_cognito_trigger_post_confirm.function_arn, ) cognito.CfnUserPoolGroup( self, "UserPoolCDDUser", group_name="cdd_user", description="General users of CDD (participants)", user_pool_id=user_pool.user_pool_id, ) cognito.CfnUserPoolGroup( self, "UserPoolCDDAdmin", group_name="cdd_admin", description="CDD administrators", user_pool_id=user_pool.user_pool_id, ) identity_pool = cognito.CfnIdentityPool( self, "IdentityPool", identity_pool_name=id.replace("-", "") + "_idpool", allow_unauthenticated_identities=False, cognito_identity_providers=[{ "clientId": user_pool.client_id, "providerName": user_pool.provider_name, }], ) core.CfnOutput( self, "CognitoIdentityPoolId", export_name="CognitoIdentityPoolId", value=identity_pool.ref, ) # Custom resource to create admin user - cannot do via CFn to set password props: CustomResourceProps = CustomResourceProps( name=id + "-CR-CreateCognitoAdminUser", lambda_directory="./lambda_functions/cr_create_admin_user", handler="index.main", timeout=30, runtime=lambda_.Runtime.PYTHON_3_7, environment={ "COGNITO_USER_POOL_ID": user_pool.user_pool_id, "COGNITO_CLIENT_ID": user_pool.client_id, "ADMIN_USERNAME": admin_user, "ADMIN_PASSWORD": admin_password, }, ) create_admin_user_cr = CustomResourceConstruct(self, "CreateAdminUser", props) # DependsOn the user pool create_admin_user_cr.resource.node.add_dependency(user_pool) policy_statement = iam.PolicyStatement() policy_statement.add_actions("cognito-idp:SignUp") policy_statement.add_actions("cognito-idp:AdminConfirmSignUp") policy_statement.add_resources("*") create_admin_user_cr.add_policy_to_role(policy_statement) # IAM roles for identity pool auth/unauth cog_unauth_role = iam.Role( self, "cognitoUnauthRole", role_name=f"Cognito_{identity_pool.identity_pool_name}_Unauth_Role", assumed_by=iam.FederatedPrincipal( "cognito-identity.amazonaws.com", conditions={ "StringEquals": { "cognito-identity.amazonaws.com:aud": identity_pool.ref }, "ForAnyValue:StringLike": { "cognito-identity.amazonaws.com:amr": "unauthenticated" }, }, assume_role_action="sts:AssumeRoleWithWebIdentity", ), ) cog_unauth_role.attach_inline_policy( iam.Policy( self, "cognitoUnauth", policy_name="cognitoUnauth", statements=[ iam.PolicyStatement( actions=[ "mobileanalytics:PutEvents", "cognito-sync:*" ], resources=["*"], ) ], )) cog_auth_role = iam.Role( self, "cognitoAuthRole", role_name=f"Cognito_{identity_pool.identity_pool_name}_Auth_Role", managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name( "AmazonESCognitoAccess") ], assumed_by=iam.FederatedPrincipal( "cognito-identity.amazonaws.com", conditions={ "StringEquals": { "cognito-identity.amazonaws.com:aud": identity_pool.ref }, "ForAnyValue:StringLike": { "cognito-identity.amazonaws.com:amr": "authenticated" }, }, assume_role_action="sts:AssumeRoleWithWebIdentity", ), ) cog_auth_role.attach_inline_policy( iam.Policy( self, "cognitoAuth", policy_name="cognitoAuth", statements=[ iam.PolicyStatement( actions=[ "mobileanalytics:PutEvents", "cognito-sync:*", "execute-api:*", ], resources=["*"], ), # Provide full access to IoT for the authenticated user # The AWS IoT policy scopes down the access iam.PolicyStatement(actions=["iot:*"], resources=["*"]), ], )) # Finally, attach auth and unauth roles to Identity pool cognito.CfnIdentityPoolRoleAttachment( self, "CDDIdentityPoolRoleAttach", identity_pool_id=identity_pool.ref, roles={ "authenticated": cog_auth_role.role_arn, "unauthenticated": cog_unauth_role.role_arn, }, ) ### Supporting IAM Roles and Policies lambda_full_access_role = iam.Role( # Wide open role for Lambda's to access other services self, "LambdaFullAccessRole", assumed_by=iam.ServicePrincipal("lambda.amazonaws.com"), inline_policies=[ iam.PolicyDocument(statements=[ iam.PolicyStatement( actions=[ "logs:CreateLogGroup", "logs:CreateLogStream", "logs:PutLogEvents", ], resources=["arn:aws:logs:*:*:*"], ), iam.PolicyStatement(actions=["*"], resources=["*"]), ]) ], ) lambda_iot_full_access_role = iam.Role( # Wide open role for Lambda's to access other services self, "LambdaIoTFullAccessRole", assumed_by=iam.ServicePrincipal("lambda.amazonaws.com"), inline_policies=[ iam.PolicyDocument(statements=[ iam.PolicyStatement( actions=[ "logs:CreateLogGroup", "logs:CreateLogStream", "logs:PutLogEvents", ], resources=["arn:aws:logs:*:*:*"], ), iam.PolicyStatement(actions=["dynamodb:*", "iot:*"], resources=["*"]), ]) ], ) lambda_api_app_role = iam.Role( # Role for APIG Lambda functions - make specific per Lambda/method if needed self, "ApiAppRole", assumed_by=iam.ServicePrincipal("lambda.amazonaws.com"), inline_policies=[ iam.PolicyDocument(statements=[ iam.PolicyStatement( actions=[ "logs:CreateLogGroup", "logs:CreateLogStream", "logs:PutLogEvents", ], resources=["arn:aws:logs:*:*:*"], ), iam.PolicyStatement( actions=["dynamodb:*"], resources=[ f"arn:aws:dynamodb:{stack.region}:{stack.account}:table/{dispenser_db.table_name}", f"arn:aws:dynamodb:{stack.region}:{stack.account}:table/{dispenser_events.table_name}", f"arn:aws:dynamodb:{stack.region}:{stack.account}:table/{user_db.table_name}", ], ), iam.PolicyStatement(actions=["iot:*"], resources=["*"]), ]) ], ) lambda_api_delete_user_role = iam.Role( # Role for APIG Lambda delete user - specific as this has to delete multiple services self, "ApiDeleteUserRole", assumed_by=iam.ServicePrincipal("lambda.amazonaws.com"), inline_policies=[ iam.PolicyDocument(statements=[ iam.PolicyStatement( actions=[ "logs:CreateLogGroup", "logs:CreateLogStream", "logs:PutLogEvents", ], resources=["arn:aws:logs:*:*:*"], ), iam.PolicyStatement( actions=["dynamodb:*"], resources=[ f"arn:aws:dynamodb:{stack.region}:{stack.account}:table/{dispenser_db.table_name}", f"arn:aws:dynamodb:{stack.region}:{stack.account}:table/{dispenser_events.table_name}", f"arn:aws:dynamodb:{stack.region}:{stack.account}:table/{user_db.table_name}", ], ), iam.PolicyStatement(actions=["cloud9:DeleteEnvironment"], resources=["*"]), iam.PolicyStatement( actions=[ "iam:DeleteLoginProfile", "iam:ListGroupsForUser", "iam:RemoveUserFromGroup", "iam:DeleteUser", ], resources=["*"], ), iam.PolicyStatement( actions=["cognito-idp:AdminDeleteUser"], resources=["*"]), iam.PolicyStatement(actions=["iot:*"], resources=["*"]), ]) ], ) lambda_api_dispense_role = iam.Role( # Role for lambda self, "CommandRole", assumed_by=iam.ServicePrincipal("lambda.amazonaws.com"), inline_policies=[ iam.PolicyDocument(statements=[ iam.PolicyStatement( actions=[ "logs:CreateLogGroup", "logs:CreateLogStream", "logs:PutLogEvents", ], resources=["arn:aws:logs:*:*:*"], ), iam.PolicyStatement( actions=["dynamodb:*"], resources=[ f"arn:aws:dynamodb:{stack.region}:{stack.account}:table/{dispenser_db.table_name}", f"arn:aws:dynamodb:{stack.region}:{stack.account}:table/{dispenser_events.table_name}", ], ), iam.PolicyStatement(actions=["iot:*"], resources=["*"]), ]) ], ) # IoT Policies iot_policy_dispenser_limited = iot.CfnPolicy( self, "IoTDispenserLimitedPolicy", policy_name=id + "-DispenserLimitedAccess", policy_document={ "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": ["iot:Connect"], "Resource": [ f"arn:aws:iot:{stack.region}:{stack.account}:client/${{iot:Connection.Thing.ThingName}}" ], "Condition": { "Bool": { "iot:Connection.Thing.IsAttached": [True] } }, }, { "Effect": "Allow", "Action": ["iot:Receive"], "Resource": ["*"] }, { "Effect": "Allow", "Action": ["iot:Subscribe"], "Resource": [ f"arn:aws:iot:{stack.region}:{stack.account}:topicfilter/$aws/things/${{iot:Certificate.Subject.CommonName}}/shadow/*", f"arn:aws:iot:{stack.region}:{stack.account}:topicfilter/$aws/things/${{iot:Certificate.Subject.CommonName}}/cmd/${{iot:Certificate.Subject.CommonName}}", ], }, { "Effect": "Allow", "Action": ["iot:Publish"], "Resource": [ f"arn:aws:iot:{stack.region}:{stack.account}:topic/$aws/things/${{iot:Certificate.Subject.CommonName}}/shadow/update", f"arn:aws:iot:{stack.region}:{stack.account}:topic/$aws/things/${{iot:Certificate.Subject.CommonName}}/shadow/get", f"arn:aws:iot:{stack.region}:{stack.account}:topic/test/${{iot:Certificate.Subject.CommonName}}", f"arn:aws:iot:{stack.region}:{stack.account}:topic/cmd/${{iot:Certificate.Subject.CommonName}}/response", ], }, ], }, ) iot_policy_client = iot.CfnPolicy( self, "IoTClientPolicy", policy_name=id + "-IoTClientAccess", policy_document={ "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": ["iot:Connect", "iot:Receive"], "Resource": ["*"], }, { "Effect": "Allow", "Action": ["iot:Subscribe"], "Resource": [ f"arn:aws:iot:{stack.region}:{stack.account}:topicfilter/events/*", f"arn:aws:iot:{stack.region}:{stack.account}:topicfilter/$aws/things/*/shadow/update/accepted", ], }, ], }, ) ### Lambda Functions # General Lambda Functions NOT associated with APIG lambda_process_events = lambda_.Function( self, "ProcessEvents", function_name=id + "-ProcessEvents", code=lambda_.AssetCode("./lambda_functions/process_events"), handler="process_events.handler", runtime=lambda_.Runtime.PYTHON_3_7, role=lambda_iot_full_access_role, timeout=core.Duration.seconds(20), environment={ "EVENT_TABLE": dispenser_events.table_name, "STATUS_TABLE": dispenser_db.table_name, }, ) ## API Lambda functions # Return credit for dispenser api_credit_dispenser_function = lambda_.Function( self, "ApiCreditDispenserFunction", function_name=id + "-ApiCreditDispenserFunction", code=lambda_.AssetCode("./lambda_functions/api_credit_dispenser"), handler="credit_dispenser.handler", runtime=lambda_.Runtime.PYTHON_3_7, role=lambda_api_app_role, timeout=core.Duration.seconds(15), memory_size=128, environment={ "DISPENSER_TABLE": dispenser_db.table_name, "EVENT_TABLE": dispenser_events.table_name, }, ) # Command api_command_function = lambda_.Function( self, "ApiCommandFunction", function_name=id + "-ApiCommandFunction", code=lambda_.AssetCode("./lambda_functions/api_command"), handler="command.handler", runtime=lambda_.Runtime.PYTHON_3_7, role=lambda_api_app_role, timeout=core.Duration.seconds(15), memory_size=128, environment={ "DispenserTable": dispenser_db.table_name, "EventTable": dispenser_events.table_name, }, ) # Request dispense operation (set shadow or command to dispense) api_dispense_function = lambda_.Function( self, "ApiDispenseFunction", function_name=id + "-ApiDispenseFunction", code=lambda_.AssetCode("./lambda_functions/api_dispense"), handler="dispense.handler", runtime=lambda_.Runtime.PYTHON_3_7, role=lambda_api_dispense_role, timeout=core.Duration.seconds(15), memory_size=128, environment={ "DISPENSER_TABLE": dispenser_db.table_name, "EVENT_TABLE": dispenser_events.table_name, }, ) # Request dispense operation (set shadow or command to dispense) api_dispenser_status_function = lambda_.Function( self, "ApiDispenserStatusFunction", function_name=id + "-ApiDispenserStatusFunction", code=lambda_.AssetCode("./lambda_functions/api_dispenser_status"), handler="dispenser_status.handler", runtime=lambda_.Runtime.PYTHON_3_7, role=lambda_api_app_role, timeout=core.Duration.seconds(15), memory_size=128, environment={"DISPENSER_TABLE": dispenser_db.table_name}, ) # Request user details from user table, create resources if needed # NOTE: This uses an overley permissive policy to create the resources needed api_get_resources_function = lambda_.Function( self, "ApiGetResourcesFunction", function_name=id + "-ApiGetResourcesFunction", code=lambda_.AssetCode("./lambda_functions/api_get_resources"), handler="get_resources.handler", runtime=lambda_.Runtime.PYTHON_3_7, role=lambda_full_access_role, # Timeout is for user creation: certain tasks such as Cloud9 may take longer timeout=core.Duration.seconds(300), memory_size=128, environment={ "DISPENSER_TABLE": dispenser_db.table_name, "EVENT_TABLE": dispenser_events.table_name, "USER_TABLE": user_db.table_name, "USER_PERMISSIONS_GROUP": user_group.group_name, "IOT_POLICY_DISPENSER_LIMITED": iot_policy_dispenser_limited.policy_name, "IOT_POLICY_CLIENT": iot_policy_client.policy_name, "CLOUD9_INSTANCE_SIZE": cloud9_instance_size, }, ) # Request user details from user table api_delete_user_function = lambda_.Function( self, "ApiDeleteUserFunction", function_name=id + "-ApiDeleteUserFunction", code=lambda_.AssetCode("./lambda_functions/api_delete_user"), handler="delete_user.handler", runtime=lambda_.Runtime.PYTHON_3_7, role=lambda_api_delete_user_role, timeout=core.Duration.seconds(28), memory_size=256, environment={ "DISPENSER_TABLE": dispenser_db.table_name, "EVENT_TABLE": dispenser_events.table_name, "USER_TABLE": user_db.table_name, "USER_POOL_ID": user_pool.user_pool_id, }, ) ### API Gateway api = apigateway.RestApi( self, id + "-API", api_key_source_type=apigateway.ApiKeySourceType.HEADER, deploy_options=apigateway.StageOptions(stage_name="prod"), ) core.CfnOutput( self, "APIEndpoint", export_name="APIEndpoint", value= f"https://{api.rest_api_id}.execute-api.{stack.region}.amazonaws.com/prod/", ) # Although / is not used as method, provide OPTIONS for hinting CORS add_cors_options(api.root) # Define Cognito authorizer and attach to gateway cog_authorizer = apigateway.CfnAuthorizer( self, "CognitoAuth", name="CognitoAuthName", rest_api_id=api.rest_api_id, type="COGNITO_USER_POOLS", identity_source="method.request.header.Authorization", provider_arns=[user_pool.user_pool_arn], ) # # Resources (paths) and methods (GET, POST, etc.), for the API api_credit_resource = api.root.add_resource("credit") add_resource_method( api_credit_resource, http_method="GET", integration=apigateway.LambdaIntegration( api_credit_dispenser_function), authorization_type=apigateway.AuthorizationType.COGNITO, authorizer=cog_authorizer, ) add_cors_options(api_credit_resource) # command api_command_resource = api.root.add_resource("command") add_resource_method( api_command_resource, http_method="GET", integration=apigateway.LambdaIntegration(api_command_function), authorization_type=apigateway.AuthorizationType.COGNITO, authorizer=cog_authorizer, ) add_cors_options(api_command_resource) # Actuate dispenser api_dispense_resource = api.root.add_resource("dispense") add_resource_method( api_dispense_resource, http_method="GET", integration=apigateway.LambdaIntegration(api_dispense_function), authorization_type=apigateway.AuthorizationType.COGNITO, authorizer=cog_authorizer, ) add_cors_options(api_dispense_resource) # Return dispenser status (from DynamoDB) api_dispenser_status_resource = api.root.add_resource("status") add_resource_method( api_dispenser_status_resource, http_method="GET", integration=apigateway.LambdaIntegration( api_dispenser_status_function), authorization_type=apigateway.AuthorizationType.COGNITO, authorizer=cog_authorizer, ) add_cors_options(api_dispenser_status_resource) # Return user details from User Table api_get_resources_resource = api.root.add_resource("getResources") add_resource_method( api_get_resources_resource, http_method="POST", integration=apigateway.LambdaIntegration( api_get_resources_function), authorization_type=apigateway.AuthorizationType.COGNITO, authorizer=cog_authorizer, ) add_cors_options(api_get_resources_resource) # Create a user based on valid token api_delete_user_resource = api.root.add_resource("deleteUser") add_resource_method( api_delete_user_resource, http_method="POST", integration=apigateway.LambdaIntegration(api_delete_user_function), authorization_type=apigateway.AuthorizationType.COGNITO, authorizer=cog_authorizer, ) add_cors_options(api_delete_user_resource) # Create policy and reference group iam.Policy( self, "UserPermissionsPolicy", groups=[user_group], policy_name=id + "-UserPermissions", statements=[ # Elevated permissions beyond the ReadOnlyUser # Allow seeing all MQTT messages iam.PolicyStatement( actions=["iot:Subscribe", "iot:Connect", "iot:Receive"], resources=["*"], ), # Allow search indexing iam.PolicyStatement( actions=["iot:SearchIndex"], resources=[ f"arn:aws:iot:{stack.region}:{stack.account}:index/AWS_Things" ], ), # Allow changing of security group ingress on EC2 (Cloud9) to support mapping 443 to iam.PolicyStatement( actions=[ "ec2:AuthorizeSecurityGroupIngress", "ec2:RevokeSecurityGroupIngress", ], resources=[ f"arn:aws:ec2:{stack.region}:{stack.account}:security-group/*" ], ), # DENY access to credentials table iam.PolicyStatement( effect=iam.Effect.DENY, actions=["dynamodb:*"], resources=[ f"arn:aws:dynamodb:{stack.region}:{stack.account}:table/{user_db.table_name}" ], ), # DENY access to S3 overall iam.PolicyStatement(effect=iam.Effect.DENY, actions=["s3:*"], resources=["*"]), ], ) # IoT Constructs # Rule to process shadow events and send to logging iot_rule_log_shadow_events = iot.CfnTopicRule( self, "LogShadowEventsRule", rule_name=id.replace("-", "") + "_LogShadowEvents", topic_rule_payload=iot.CfnTopicRule.TopicRulePayloadProperty( description= "Based on shadow topic and content, process messages via Lambda", rule_disabled=False, aws_iot_sql_version="2016-03-23", sql= "select *, topic() AS topic FROM '$aws/things/+/shadow/update/documents'", actions=[ iot.CfnTopicRule.ActionProperty( lambda_=iot.CfnTopicRule.LambdaActionProperty( function_arn=lambda_process_events.function_arn)) ], ), ) # Allow rule to invoke the logging function lambda_process_events.add_permission( "AllowIoTRule1", principal=iam.ServicePrincipal("iot.amazonaws.com"), source_arn=iot_rule_log_shadow_events.attr_arn, ) # Rule to process generic events and send to logging iot_rule_log_generic_events = iot.CfnTopicRule( self, "LogGenericEventsRule", rule_name=id.replace("-", "") + "_LogGenericEvents", topic_rule_payload=iot.CfnTopicRule.TopicRulePayloadProperty( description="Log generic events, enrich, then send to Lambda", rule_disabled=False, aws_iot_sql_version="2016-03-23", sql= "select *, timestamp() AS ts, topic() AS topic FROM 'events'", actions=[ iot.CfnTopicRule.ActionProperty( lambda_=iot.CfnTopicRule.LambdaActionProperty( function_arn=lambda_process_events.function_arn)) ], ), ) # Allow generic_events rule to Invoke the process_events function lambda_process_events.add_permission( "AllowIoTRule2", principal=iam.ServicePrincipal("iot.amazonaws.com"), source_arn=iot_rule_log_generic_events.attr_arn, ) # Rule to process dispenser specific events and send to logging iot_rule_log_dispenser_events = iot.CfnTopicRule( self, "LogDispenserEventsRule", rule_name=id.replace("-", "") + "_LogDispenserEvents", topic_rule_payload=iot.CfnTopicRule.TopicRulePayloadProperty( description= "Log specific dispenser events, enrich, then send to Lambda", rule_disabled=False, aws_iot_sql_version="2016-03-23", sql= "select *, timestamp() AS ts, topic() AS topic FROM 'events/+'", actions=[ iot.CfnTopicRule.ActionProperty( lambda_=iot.CfnTopicRule.LambdaActionProperty( function_arn=lambda_process_events.function_arn)) ], ), ) # Allow log_dispenser_events rule to Invoke the process_events function lambda_process_events.add_permission( "AllowIoTRule3", principal=iam.ServicePrincipal("iot.amazonaws.com"), source_arn=iot_rule_log_dispenser_events.attr_arn, ) # Rule to process cmd/NNN/response WHERE "command=dispense" iot_rule_command_response_dispense = iot.CfnTopicRule( self, "DispenseCommandResponseRule", rule_name=id.replace("-", "") + "_DispenseCommandResponse", topic_rule_payload=iot.CfnTopicRule.TopicRulePayloadProperty( description= "Invoke Lambda to process dispense commands from dispenser", rule_disabled=False, aws_iot_sql_version="2016-03-23", sql= "select *, topic() AS topic FROM '$aws/things/+/shadow/update/accepted' WHERE isUndefined(state.reported.response) = False", actions=[ iot.CfnTopicRule.ActionProperty( lambda_=iot.CfnTopicRule.LambdaActionProperty( function_arn=api_dispense_function.function_arn)) ], ), ) # Allow command_response rule to Invoke the dispense function to reconcile outstanding requests api_dispense_function.add_permission( "AllowIoTCommandResponseRule", principal=iam.ServicePrincipal("iot.amazonaws.com"), source_arn=iot_rule_command_response_dispense.attr_arn, ) # Custom resource to delete workshop users - run to clean up any lingering ones # if the admin user didn't clean up. A lot of dependsOn as users are created with bindings # to other resources props: CustomResourceProps = CustomResourceProps( name=id + "-CR-DeleteParticipantUsers", lambda_directory="./lambda_functions/cr_delete_participant_users", handler="index.main", timeout=30, runtime=lambda_.Runtime.PYTHON_3_7, environment={ # Read user records from UserTable "USER_TABLE": user_db.table_name, # Invoke the api_delete_user function "DELETE_USER_LAMBDA_FUNCTION": api_delete_user_function.function_arn, }, ) delete_participant_users_cr = CustomResourceConstruct( self, "DeleteParticpantUsers", props) # DependsOn the API Delete User Function delete_participant_users_cr.resource.node.add_dependency( api_delete_user_function) # DependsOn the user pool to delete Cognito users delete_participant_users_cr.resource.node.add_dependency(user_pool) # DependsOn the DynamoDB UserTable delete_participant_users_cr.resource.node.add_dependency(user_db) # DependsOn the IoT dispenser and client policies delete_participant_users_cr.resource.node.add_dependency( iot_policy_dispenser_limited) delete_participant_users_cr.resource.node.add_dependency( iot_policy_client) # DependsOn the IoT IAM user group delete_participant_users_cr.resource.node.add_dependency(user_group) # Permissions for function to delete users policy_statement = iam.PolicyStatement() policy_statement.add_actions("dynamodb:*") policy_statement.add_resources( f"arn:aws:dynamodb:{stack.region}:{stack.account}:table/{user_db.table_name}" ) delete_participant_users_cr.add_policy_to_role(policy_statement) policy_statement = iam.PolicyStatement() policy_statement.add_actions("lambda:InvokeFunction") policy_statement.add_resources(api_delete_user_function.function_arn) delete_participant_users_cr.add_policy_to_role(policy_statement)
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) custom_allow_policy = iam.ManagedPolicy( self, "socialistir-custom-shub-write", managed_policy_name="socialistir-custom-shub-write", statements=[ iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ "s3:PutAnalyticsConfiguration", "s3:PutAccelerateConfiguration", "s3:DeleteObjectVersion", "s3:RestoreObject", "s3:CreateBucket", "s3:ReplicateObject", "s3:PutEncryptionConfiguration", "s3:DeleteBucketWebsite", "s3:AbortMultipartUpload", "s3:PutLifecycleConfiguration", "s3:DeleteObject", "s3:DeleteBucket", "s3:PutBucketVersioning", "s3:PutMetricsConfiguration", "s3:PutReplicationConfiguration", "s3:PutObjectLegalHold", "s3:PutBucketCORS", "s3:PutInventoryConfiguration", "s3:PutObject", "s3:PutBucketNotification", "s3:PutBucketWebsite", "s3:PutBucketRequestPayment", "s3:PutObjectRetention", "s3:PutBucketLogging", "s3:PutBucketObjectLockConfiguration", "s3:ReplicateDelete" ], resources=[ "arn:aws:s3:::socialistir-prod", "arn:aws:s3:::socialistir-prod/*" ]) ]) custom_deny_policy = iam.ManagedPolicy( self, "S3-Custom-Shub-Deny_Write", managed_policy_name="S3-Custom-Shub-Deny_Write", statements=[ iam.PolicyStatement( effect=iam.Effect.DENY, actions=[ "s3:PutAnalyticsConfiguration", "s3:PutAccelerateConfiguration", "s3:PutMetricsConfiguration", "s3:PutReplicationConfiguration", "s3:CreateBucket", "s3:PutBucketCORS", "s3:PutInventoryConfiguration", "s3:PutEncryptionConfiguration", "s3:PutBucketNotification", "s3:DeleteBucketWebsite", "s3:PutBucketWebsite", "s3:PutBucketRequestPayment", "s3:PutBucketLogging", "s3:PutLifecycleConfiguration", "s3:PutBucketObjectLockConfiguration", "s3:DeleteBucket", "s3:PutBucketVersioning", "s3:ReplicateObject", "s3:PutObject", "s3:AbortMultipartUpload", "s3:PutObjectRetention", "s3:DeleteObjectVersion", "s3:RestoreObject", "s3:PutObjectLegalHold", "s3:DeleteObject", "s3:ReplicateDelete" ], resources=[ "arn:aws:s3:::socialistir-prod", "arn:aws:s3:::socialistir-prod/*" ]) ]) devgroup1 = iam.Group(self, "Developer-socialistir", group_name="Developer-socialistir", managed_policies=[custom_allow_policy]) devgroup2 = iam.Group(self, "Developer-teamA", group_name="Developer-teamA") ############ This section should be executed only once in lifetime as ############################################ ############ AWS CDK does not support destion of S3 buckects yet and will ############################################ ############ throw errors on subsequent deploy or destruct or rollback ############################################ ############ S3 bucts are global across all AWS accounts ############################################ # bucket = s3.Bucket(self, id='socialistir-prod', bucket_name='socialistir-prod', versioned=True, website_error_document='index.html', website_index_document='index.html') trail = cloudtrail.Trail(self, "S3-Write-Operation-Trail") trail.add_s3_event_selector( ["arn:aws:s3:::socialistir-prod/"], include_management_events=True, read_write_type=cloudtrail.ReadWriteType.WRITE_ONLY) # ###################################################################################################################### topic = sns.Topic(self, "S3-Notification-Write", topic_name="S3-Notification-Write") topic.add_subscription(subs.EmailSubscription('*****@*****.**')) ep = { "source": ["aws.s3"], "detail": { "eventSource": ["s3.amazonaws.com"], "eventName": [ "ListObjects", "ListObjectVersions", "PutObject", "GetObject", "HeadObject", "CopyObject", "GetObjectAcl", "PutObjectAcl", "CreateMultipartUpload", "ListParts", "UploadPart", "CompleteMultipartUpload", "AbortMultipartUpload", "UploadPartCopy", "RestoreObject", "DeleteObject", "DeleteObjects", "GetObjectTorrent", "SelectObjectContent", "PutObjectLockRetention", "PutObjectLockLegalHold", "GetObjectLockRetention", "GetObjectLockLegalHold" ], "requestParameters": { "bucketName": ["socialistir-prod"] } } } rule = events.Rule(self, "Shub-s3", description='Rule created by CDK for S3 monitoring', enabled=True, rule_name="Shub-s3", event_pattern=ep) lambda_dir_path = os.path.join(os.getcwd(), "ir_cdk_stacks", "in_s3_01") response_lambda = _lambda.Function( self, "S3WriteIR", runtime=_lambda.Runtime.PYTHON_3_8, handler="lambda_function.lambda_handler", code=_lambda.Code.from_asset(lambda_dir_path), function_name="S3WriteIR") response_lambda.add_to_role_policy( iam.PolicyStatement( actions=[ "iam:*", "organizations:DescribeAccount", "organizations:DescribeOrganization", "organizations:DescribeOrganizationalUnit", "organizations:DescribePolicy", "organizations:ListChildren", "organizations:ListParents", "organizations:ListPoliciesForTarget", "organizations:ListRoots", "organizations:ListPolicies", "organizations:ListTargetsForPolicy" ], effect=iam.Effect.ALLOW, resources=["*"], )) response_lambda.add_to_role_policy( iam.PolicyStatement( actions=[ "iam:*", "organizations:DescribeAccount", "organizations:DescribeOrganization", "organizations:DescribeOrganizationalUnit", "organizations:DescribePolicy", "organizations:ListChildren", "organizations:ListParents", "organizations:ListPoliciesForTarget", "organizations:ListRoots", "organizations:ListPolicies", "organizations:ListTargetsForPolicy" ], effect=iam.Effect.ALLOW, resources=["*"], )) response_lambda.add_to_role_policy( iam.PolicyStatement( actions=["s3:*"], effect=iam.Effect.ALLOW, resources=["*"], )) response_lambda.add_to_role_policy( iam.PolicyStatement( actions=["sns:*"], effect=iam.Effect.ALLOW, resources=["*"], )) rule.add_target(event_target.LambdaFunction(response_lambda))
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) pvt_bkt = s3.Bucket( self, "abacBucket", versioned=True, # encryption=s3.BucketEncryption.KMS_MANAGED, block_public_access=s3.BlockPublicAccess(block_public_policy=True), removal_policy=core.RemovalPolicy.DESTROY ) pvt_bkt.add_to_resource_policy( iam.PolicyStatement( effect=iam.Effect.ALLOW, # actions=["s3:GetObject"], actions=["s3:*"], # resources=[pvt_bkt.arn_for_objects("file.txt")], resources=[pvt_bkt.arn_for_objects("*")], principals=[iam.AccountRootPrincipal()] ) ) # Create 3 Users: 1 Admin & 2 Normal Users # Lets generate a password for our user redRosy_new_pass = random_string_generator( self, "redRosyNewPasswordGenerator", Length=20 ) redRosy = iam.User( self, "redRosy", user_name="redRosy", password=core.SecretValue.plain_text(redRosy_new_pass.response) ) blueBob_new_pass = random_string_generator( self, "blueBobNewPasswordGenerator", Length=20 ) blueBob = iam.User( self, "blueBob", user_name="blueBob", password=core.SecretValue.plain_text(blueBob_new_pass.response) ) annoyingAdmin_new_pass = random_string_generator( self, "annoyingAdminNewPasswordGenerator", Length=20 ) annoyingAdmin = iam.User( self, "annoyingAdmin", user_name="annoyingAdmin", password=core.SecretValue.plain_text(annoyingAdmin_new_pass.response) ) teamUnicornGrp = iam.Group( self, "teamUnicorn", group_name="teamUnicorn" ) # Add Users To Group teamUnicornGrp.add_user(redRosy) teamUnicornGrp.add_user(blueBob) teamUnicornGrp.add_user(annoyingAdmin) # blueGrp1.add_managed_policy(iam.ManagedPolicy.from_aws_managed_policy_name("AmazonS3ReadOnlyAccess")) ############################################## # We need a custom resource to TAG IAM Users # ############################################## iamUserTaggerResp = iam_user_tagger( self, "iamTagger", message=[ {"user":redRosy.user_name, "tags":[{'Key': 'teamName','Value':'teamUnicorn'}, {'Key': 'projectName','Value':'projectRed'} ] }, {"user":blueBob.user_name, "tags":[{'Key': 'teamName','Value':'teamUnicorn'}, {'Key': 'projectName','Value':'projectBlue'} ] }, {"user":annoyingAdmin.user_name, "tags":[{'Key': 'teamName','Value':'teamUnicorn'}, {'Key': 'teamAdmin','Value':'yes'} ] } ] ) """ resource = MyCustomResource( self, "iamTagger", message=[ {"user":redRosy.user_name, "tags":[{'Key': 'teamName','Value':'teamUnicorn'}, {'Key': 'projectName','Value':'projectRed'} ] }, {"user":blueBob.user_name, "tags":[{'Key': 'teamName','Value':'teamUnicorn'}, {'Key': 'projectName','Value':'projectBlue'} ] }, {"user":annoyingAdmin.user_name, "tags":[{'Key': 'teamName','Value':'teamUnicorn'}, {'Key': 'teamAdmin','Value':'yes'} ] } ] ) """ # Lets Create the IAM Role # Uses belonging to this group, will be able to asume this role accountId=core.Aws.ACCOUNT_ID teamUnicornProjectRedRole = iam.Role( self, 'teamUnicornProjectRedRoleId', assumed_by=iam.AccountPrincipal(f"{accountId}"), role_name="teamUnicornProjectRedRole" ) core.Tag.add(teamUnicornProjectRedRole, key="teamName",value="teamUnicorn") core.Tag.add(teamUnicornProjectRedRole, key="projectName",value="projectRed") teamUnicornProjectBlueRole = iam.Role( self, 'teamUnicornProjectBlueRoleId', assumed_by=iam.AccountPrincipal(f"{accountId}"), role_name="teamUnicornProjectBlueRole" ) core.Tag.add(teamUnicornProjectBlueRole, key="teamName",value="teamUnicorn") core.Tag.add(teamUnicornProjectBlueRole, key="projectName",value="projectBlue") teamUnicornTeamAdminRole = iam.Role( self, 'teamUnicornTeamAdminRoleId', assumed_by=iam.AccountPrincipal(f"{accountId}"), role_name="teamUnicornTeamAdminRole" ) core.Tag.add(teamUnicornTeamAdminRole, key="teamName",value="teamUnicorn") core.Tag.add(teamUnicornTeamAdminRole, key="teamAdmin",value="yes") # Allow Group to Assume Role grpStmt1=iam.PolicyStatement( effect=iam.Effect.ALLOW, resources=[f"arn:aws:iam::{accountId}:role/teamUnicornProject*"], actions=["sts:AssumeRole"], conditions={ "StringEquals": { "iam:ResourceTag/teamName": "${aws:PrincipalTag/teamName}", "iam:ResourceTag/projectName": "${aws:PrincipalTag/projectName}" } } ) grpStmt1.sid="AllowGroupMembersToAssumeRoleMatchingTeamName" grpStmt2=iam.PolicyStatement( effect=iam.Effect.ALLOW, resources=[f"arn:aws:iam::{accountId}:role/teamUnicornTeamAdminRole"], actions=["sts:AssumeRole"], conditions={ "StringEquals": { "iam:ResourceTag/teamName": "${aws:PrincipalTag/teamName}", "iam:ResourceTag/teamAdmin": "yes" } } ) grpStmt2.sid="AllowTeamAdminToAssumeRoleMatchingTeamName" teamUnicornGrp.add_to_policy( grpStmt1 ) teamUnicornGrp.add_to_policy( grpStmt2 ) # Add Permissions to the Role roleStmt1=iam.PolicyStatement( effect=iam.Effect.ALLOW, resources=["*"], actions=["s3:ListAllMyBuckets", "s3:HeadBucket"] ) roleStmt1.sid="AllowGroupToSeeBucketListInTheConsole" roleStmt2=iam.PolicyStatement( effect=iam.Effect.ALLOW, resources=[pvt_bkt.bucket_arn], actions=["s3:ListBucket","s3:ListBucketVersions"], # Below condition can be used to enable listing a particular prefix in another statement # conditions={ "StringEquals" : { "s3:prefix":[""], "s3:delimiter":["/"] } } ) roleStmt2.sid="AllowRootLevelListingOfBucket" roleStmt3=iam.PolicyStatement( effect=iam.Effect.ALLOW, resources=[pvt_bkt.arn_for_objects("*")], actions=["s3:Get*","s3:DeleteObjectTagging"], conditions={ "StringEquals": { "s3:ExistingObjectTag/teamName" : "${aws:PrincipalTag/teamName}", "s3:ExistingObjectTag/projectName" : "${aws:PrincipalTag/projectName}" } } ) roleStmt3.sid="ReadOnlyAccessToTeams" roleStmt4=iam.PolicyStatement( effect=iam.Effect.ALLOW, resources=[pvt_bkt.arn_for_objects("*")], actions=["s3:PutObject","s3:PutObjectTagging","s3:PutObjectVersionTagging"], conditions={ "StringEquals": { "s3:RequestObjectTag/teamName" : "${aws:PrincipalTag/teamName}", "s3:RequestObjectTag/projectName" : "${aws:PrincipalTag/projectName}" } } ) roleStmt4.sid="WriteTaggedObjectOwnedByThem" roleStmt5=iam.PolicyStatement( effect=iam.Effect.ALLOW, resources=[pvt_bkt.bucket_arn, pvt_bkt.arn_for_objects("*")], actions=["s3:*"], conditions={ "StringEquals" : { "${aws:PrincipalTag/teamAdmin}": [ "yes" ] } } ) roleStmt5.sid="FullAccessToAdminsFromSameTeam" teamUnicornProjectRedRole.add_to_policy( roleStmt1 ) teamUnicornProjectRedRole.add_to_policy( roleStmt2 ) teamUnicornProjectRedRole.add_to_policy( roleStmt3 ) teamUnicornProjectRedRole.add_to_policy( roleStmt4 ) teamUnicornProjectRedRole.add_to_policy( roleStmt5 ) # Add same permissions to projectBlueRole teamUnicornProjectBlueRole.add_to_policy( roleStmt1 ) teamUnicornProjectBlueRole.add_to_policy( roleStmt2 ) teamUnicornProjectBlueRole.add_to_policy( roleStmt3 ) teamUnicornProjectBlueRole.add_to_policy( roleStmt4 ) teamUnicornProjectBlueRole.add_to_policy( roleStmt5 ) # Add same permissions to teamAdminRole teamUnicornTeamAdminRole.add_to_policy( roleStmt1 ) teamUnicornTeamAdminRole.add_to_policy( roleStmt2 ) teamUnicornTeamAdminRole.add_to_policy( roleStmt3 ) teamUnicornTeamAdminRole.add_to_policy( roleStmt4 ) teamUnicornTeamAdminRole.add_to_policy( roleStmt5 ) ########################################### ################# OUTPUTS ################# ########################################### output0 = core.CfnOutput(self, "SecuirtyAutomationFrom", value=f"{global_args.SOURCE_INFO}", description="To know more about this automation stack, check out our github page." ) output1_r = core.CfnOutput(self, "User:redRosy", value=redRosy_new_pass.response, description=f"Red Rosy User Password" ) output1_b = core.CfnOutput(self, "User:blueBob", value=blueBob_new_pass.response, description=f"Red Rosy User Password" ) output1_a = core.CfnOutput(self, "User:annoyingAdmin", value=annoyingAdmin_new_pass.response, description=f"Red Rosy User Password" ) output2 = core.CfnOutput(self, "SecurePrivateBucket", value=( f"https://console.aws.amazon.com/s3/buckets/" f"{pvt_bkt.bucket_name}" ), description=f"S3 Bucket to Test ABAC" ) output3 = core.CfnOutput(self, "Rosy-Assume-RedRole-Url", value=( f"https://signin.aws.amazon.com/switchrole?roleName=" f"{teamUnicornProjectRedRole.role_name}" f"&account=" f"{core.Aws.ACCOUNT_ID}" ), description=f"The URL for Rosy to assume teamRed Role" ) output4 = core.CfnOutput(self, "blueBob-Assume-RedRole-Url", value=( f"https://signin.aws.amazon.com/switchrole?roleName=" f"{teamUnicornProjectBlueRole.role_name}" f"&account=" f"{core.Aws.ACCOUNT_ID}" ), description=f"The URL for Bob to assume teamBlue Role" ) output5 = core.CfnOutput(self, "SampleS3UploadCommands", value=( f"aws s3api put-object-tagging --bucket {pvt_bkt.bucket_name} --key YOUR-OBJECT --tagging 'TagSet=[{{Key=projectName,Value=teamRed}}]'" ), description=f"For ProjectRed" ) output10 = core.CfnOutput(self, "User-Login-Url", value=( f"https://{core.Aws.ACCOUNT_ID}.signin.aws.amazon.com/console" ), description=f"The URL for Rosy to assume teamRed Role" )
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # Lets generate a password for our user shiny_new_pass = random_string_generator(self, "shinyNewPasswordGenerator", Length=20) # Lets create a user projectRedUser1redRosy = iam.User(self, "projectRedUser1redRosy", user_name="redRosy", password=core.SecretValue.plain_text( shiny_new_pass.response)) teamUnicornGrp = iam.Group(self, "teamUnicornGrp", group_name="teamUnicorn") # Add Users To Group teamUnicornGrp.add_user(projectRedUser1redRosy) # blueGrp1.add_managed_policy(iam.ManagedPolicy.from_aws_managed_policy_name("AmazonS3ReadOnlyAccess")) ############################################## # We need a custom resource to TAG IAM Users # ############################################## iamUserTaggerResp = iam_user_tagger( self, "iamTagger", message=[{ "user": projectRedUser1redRosy.user_name, "tags": [{ 'Key': 'teamName', 'Value': 'teamUnicorn' }, { 'Key': 'projectName', 'Value': 'projectRed' }] }]) # Lets Create the IAM Role # Uses belonging to this group, will be able to asume this role based on tag validation accountId = core.Aws.ACCOUNT_ID teamUnicornProjectRedRole = iam.Role( self, 'unicornTeamProjectRedRoleId', assumed_by=iam.AccountPrincipal(f"{accountId}"), role_name="teamUnicornProjectRedRole") core.Tag.add(teamUnicornProjectRedRole, key="teamName", value="teamUnicorn") core.Tag.add(teamUnicornProjectRedRole, key="projectName", value="projectRed") """ # Allow Group to Assume Role # The role will have naming convention like, <TEAM-NAME><PROJECT-NAME>ROLE For Ex: unicornTeamProjectRedRole """ grpStmt1 = iam.PolicyStatement( effect=iam.Effect.ALLOW, resources=[f"arn:aws:iam::{accountId}:role/teamUnicornProject*"], actions=["sts:AssumeRole"], conditions={ "StringEquals": { "iam:ResourceTag/teamName": "${aws:PrincipalTag/teamName}", "iam:ResourceTag/projectName": "${aws:PrincipalTag/projectName}" } }) grpStmt1.sid = "AllowGroupMembersToAssumeRoleMatchingTeamName" # Attach the policy to the group teamUnicornGrp.add_to_policy(grpStmt1) # Add Permissions to the Role roleStmt0 = iam.PolicyStatement(effect=iam.Effect.ALLOW, resources=["*"], actions=[ "ec2:Describe*", "cloudwatch:Describe*", "cloudwatch:Get*", ]) roleStmt0.sid = "AllowUserToDescribeInstances" teamUnicornProjectRedRole.add_to_policy(roleStmt0) roleStmt1a = iam.PolicyStatement( effect=iam.Effect.ALLOW, resources=[ "arn:aws:ec2:*::image/*", "arn:aws:ec2:*::snapshot/*", "arn:aws:ec2:*:*:subnet/*", "arn:aws:ec2:*:*:network-interface/*", "arn:aws:ec2:*:*:security-group/*", "arn:aws:ec2:*:*:key-pair/*" ], actions=["ec2:RunInstances"]) roleStmt1a.sid = "AllowRunInstances" teamUnicornProjectRedRole.add_to_policy(roleStmt1a) roleStmt1b = iam.PolicyStatement( effect=iam.Effect.ALLOW, resources=[ "arn:aws:ec2:*:*:instance/*", "arn:aws:ec2:*:*:volume/*", ], actions=["ec2:CreateVolume", "ec2:RunInstances"], conditions={ "StringEquals": { "aws:RequestTag/teamName": "${aws:PrincipalTag/teamName}", "aws:RequestTag/projectName": "${aws:PrincipalTag/projectName}" }, "ForAllValues:StringEquals": { "aws:TagKeys": ["teamName", "projectName"] } }) roleStmt1b.sid = "AllowRunInstancesWithRestrictionsRequiredTags" teamUnicornProjectRedRole.add_to_policy(roleStmt1b) roleStmt2 = iam.PolicyStatement( effect=iam.Effect.ALLOW, resources=[ "arn:aws:ec2:*:*:instance/*", "arn:aws:ec2:*:*:volume/*" ], actions=["ec2:CreateTags"], conditions={ "StringEquals": { "aws:RequestTag/teamName": "${aws:PrincipalTag/teamName}", "aws:RequestTag/projectName": "${aws:PrincipalTag/projectName}" }, "ForAllValues:StringEquals": { "aws:TagKeys": ["projectName", "teamName"] }, "StringEquals": { "ec2:CreateAction": "RunInstances" } }) roleStmt2.sid = "AllowCreateTagsIfRequestingValidTags" teamUnicornProjectRedRole.add_to_policy(roleStmt2) roleStmt3 = iam.PolicyStatement( effect=iam.Effect.ALLOW, resources=[ "arn:aws:ec2:*:*:instance/*", "arn:aws:ec2:*:*:volume/*" ], actions=[ "ec2:RebootInstances", "ec2:TerminateInstances", "ec2:StartInstances", "ec2:StopInstances" ], conditions={ "StringEquals": { "ec2:ResourceTag/teamName": "${aws:PrincipalTag/teamName}", "ec2:ResourceTag/projectName": "${aws:PrincipalTag/projectName}" } }) roleStmt3.sid = "AllowInstanceManagementIfTagsMatch" teamUnicornProjectRedRole.add_to_policy(roleStmt3) # Lets create couple of instances to test vpc = ec2.Vpc(self, "abacVPC", cidr="10.13.0.0/21", max_azs=2, nat_gateways=0, subnet_configuration=[ ec2.SubnetConfiguration( name="pubSubnet", cidr_mask=24, subnet_type=ec2.SubnetType.PUBLIC) ]) # Tag all VPC Resources core.Tag.add(vpc, key="Owner", value="KonStone", include_resource_types=[]) core.Tag.add(vpc, key="teamName", value="teamUnicorn", include_resource_types=[]) # We are using the latest AMAZON LINUX AMI ami_id = ec2.AmazonLinuxImage(generation=ec2.AmazonLinuxGeneration. AMAZON_LINUX_2).get_image(self).image_id red_web_inst = ec2.CfnInstance( self, "redWebInstance01", image_id=ami_id, instance_type="t2.micro", monitoring=False, tags=[{ "key": "teamName", "value": "teamUnicorn" }, { "key": "projectName", "value": "projectRed" }, { "key": "Name", "value": "projectRed-Web" }], network_interfaces=[{ "deviceIndex": "0", "associatePublicIpAddress": True, "subnetId": vpc.public_subnets[0].subnet_id, # "groupSet": [web_sg.security_group_id] }], #https: //github.com/aws/aws-cdk/issues/3419 ) # core.Tag.add(red_web_inst,key="Owner",value="KonStone",include_resource_types=[]) blue_web_inst = ec2.CfnInstance( self, "blueWebInstance01", image_id=ami_id, instance_type="t2.micro", monitoring=False, tags=[{ "key": "teamName", "value": "teamUnicorn" }, { "key": "projectName", "value": "projectBlue" }, { "key": "Name", "value": "projectBlue-Web" }], network_interfaces=[{ "deviceIndex": "0", "associatePublicIpAddress": True, "subnetId": vpc.public_subnets[0].subnet_id, # "groupSet": [web_sg.security_group_id] }], #https: //github.com/aws/aws-cdk/issues/3419 ) # core.Tag.add(blue_web_inst,key="Owner",value="KonStone",include_resource_types=[]) # https://signin.aws.amazon.com/switchrole?roleName=teamUnicornProjectRedRole&account=lint3r role_login_url = ( f"https://signin.aws.amazon.com/switchrole?&account={accountId}" f"&roleName={teamUnicornProjectRedRole.role_name}") output1 = core.CfnOutput(self, "Red-Rosy-AssumeRoleUrl", value=role_login_url, description="Url to login & assume role") output2 = core.CfnOutput(self, "redRosy_user_password", value=shiny_new_pass.response, description="redRosy user password") # Publish the custom resource output output3 = core.CfnOutput( self, "IAMUserTaggerResponseMessage", description="IAM User Tagging Successful", value=iamUserTaggerResp.response, ) # Publish WebInstances ID and Tags output4 = core.CfnOutput( self, "ProjectRed-Web-Instance", description="Project Red Web Instance Publice IP", value=core.Fn.get_att(logical_name_of_resource="redWebInstance01", attribute_name="PublicIp").to_string(), ) output5 = core.CfnOutput( self, "ProjectBlue-Web-Instance", description="Project Blue Web Instance Publice IP", value=core.Fn.get_att(logical_name_of_resource="blueWebInstance01", attribute_name="PublicIp").to_string(), ) output10 = core.CfnOutput( self, "Red-Rosy-User-Login-Url", value=( f"https://{core.Aws.ACCOUNT_ID}.signin.aws.amazon.com/console" ), description=f"The URL for Rosy to Login")
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # Tag all constructs with the project for easy billing drilldown, # filtering, and organization. core.Tags.of(self).add('project', 'MediaTranscription') # Media files bucket media_bucket = s3.Bucket( self, 'media-transcription-bucket', encryption=s3.BucketEncryption.S3_MANAGED, ) # SQS queue for media files bucket event notifications media_bucket_event_queue = sqs.Queue( self, 'media-transcription-event-notification-queue', queue_name='media-transcription-event-notification-queue', visibility_timeout=core.Duration.seconds(60), dead_letter_queue=sqs.DeadLetterQueue( max_receive_count=3, queue=sqs.Queue( self, 'media-transcription-event-notifications-dlq', queue_name='media-transcription-event-notifications-dlq', )), ) # S3 object created notifications sent to SQS queue media_bucket.add_event_notification( s3.EventType.OBJECT_CREATED, s3n.SqsDestination(media_bucket_event_queue), *[s3.NotificationKeyFilter(prefix='media-input/')], ) # Lambda function to create/submit Transcribe jobs transcribe_job_init_fn = lambda_.Function( self, 'transcribe-job-init-fn', runtime=lambda_.Runtime.PYTHON_3_8, code=lambda_.Code.from_asset( '../lambdas/transcribe-job-init-fn', # The following is just dumb. # The Lambda runtime doesn't use the latest boto3 by default. # In order to use the latest boto3, we have to pip install # and bundle locally using Docker. # Q: Why need the latest boto3? # A: https://github.com/boto/boto3/issues/2630 # I'll have to delete the ECR containers to avoid cost. # TODO: Revert back to normal in like a month I guess. bundling={ 'image': lambda_.Runtime.PYTHON_3_8.bundling_docker_image, 'command': [ 'bash', '-c', '\n pip install -r requirements.txt -t /asset-output &&\n cp -au . /asset-output\n ' ] }), handler='fn.handler', reserved_concurrent_executions=1, # Effectively single-threaded ) # Triggered by SQS messages created for media file puts transcribe_job_init_fn.add_event_source( les.SqsEventSource( queue=media_bucket_event_queue, batch_size=5, enabled=True, )) # Grant access to start transcription jobs transcribe_job_init_fn.add_to_role_policy( statement=iam.PolicyStatement( actions=[ 'transcribe:StartTranscriptionJob', ], resources=['*'], effect=iam.Effect.ALLOW, )) # Grant Lambda role to read and write to input and output portions of # the S3 bucket. # Q: Why grant Lambda the permissions instead of Transcribe service? # A: Two-fold: # - i) https://amzn.to/321Nx5I # - ii) Granting just to this Lambda means other Transcribe jobs # across the account cannot use this bucket (least privilege). media_bucket.grant_read( identity=transcribe_job_init_fn.grant_principal, objects_key_pattern='media-input/*') # Cannot specify a prefix for writes as Transcribe will not accept # a job unless it has write permission on the whole bucket. # Edit: The above statement was when I had to use '*' for writes. But # now, I granted access to that .write_access_check_file.temp # file and it seems to all work now? media_bucket.grant_write( identity=transcribe_job_init_fn.grant_principal, objects_key_pattern='transcribe-output-raw/*') # This is just as frustrating to you as it is to me. media_bucket.grant_write( identity=transcribe_job_init_fn.grant_principal, objects_key_pattern='.write_access_check_file.temp') # DynamoDB table for Jobs metadata jobs_metadata_table = ddb.Table( self, 'MediaTranscription-TranscriptionJobs', table_name='MediaTranscription-TranscriptionJobs', partition_key=ddb.Attribute( name='Bucket-Key-ETag', type=ddb.AttributeType.STRING, ), billing_mode=ddb.BillingMode.PAY_PER_REQUEST, ) jobs_metadata_table.grant(transcribe_job_init_fn.grant_principal, *[ 'dynamodb:GetItem', 'dynamodb:PutItem', ]) # Create IAM Group with read/write permissions to S3 bucket # TODO: Make this more federated and robust console_users_group = iam.Group(self, 'MediaTranscriptionConsoleUsers') console_users_group.attach_inline_policy(policy=iam.Policy( self, 'MediaTranscriptionConsoleUserS3Access', statements=[ iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ 's3:ListBucket', ], resources=[ media_bucket.bucket_arn, ], ), iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ 's3:GetObject', 's3:PutObject', ], resources=[ media_bucket.arn_for_objects('media-input/*'), ], ), iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ 's3:GetObject', ], resources=[ media_bucket.arn_for_objects( 'transcribe-output-raw/*'), ], ), ], ))
def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) #ssm parameter for test ssm_01 = aws_ssm.StringParameter(self, "ssm_01", description="test parameter", parameter_name="/test/demo/iam", string_value="123", tier=aws_ssm.ParameterTier.STANDARD) #IAM users and Groups user01_password = aws_secrm.Secret(self, "user01password", description="User01 Password", secret_name="user01_password") #users user01 = aws_iam.User(self, "user01", password=user01_password.secret_value, user_name="user01") user02 = aws_iam.User( self, "user02", password=core.SecretValue.plain_text("Testpassowrd123"), user_name="user02") #iam group test_group = aws_iam.Group(self, "test_group", group_name="test-group") test_group.add_user(user02) #iam policy test_group.add_managed_policy( aws_iam.ManagedPolicy.from_aws_managed_policy_name( "AmazonS3ReadOnlyAccess")) #grant ssm parameter access to iam group ssm_01.grant_read(test_group) #grant test group to list all parameters in aws console test_group_stemt = aws_iam.PolicyStatement( effect=aws_iam.Effect.ALLOW, resources=["*"], actions=["ssm:DescribeParameters"]) test_group_stemt.sid = "ListAllParametersForTestGroup" #add PolicyStatement to group test_group.add_to_policy(test_group_stemt) #iam role test_role = aws_iam.Role( self, "test_role", assumed_by=aws_iam.AccountPrincipal(f"{core.Aws.ACCOUNT_ID}"), role_name="cdk_test_role") #policy attached to role managed_policy_01 = aws_iam.ManagedPolicy( self, "managed_policy_01_list_ec2", description="list ec2", managed_policy_name="managed_policy_01_list_ec2", statements=[ aws_iam.PolicyStatement(effect=aws_iam.Effect.ALLOW, actions=[ "ec2:Describe*", "cloudwatch:Describe*", "cloudwatch:Get*" ], resources=["*"]) ], roles=[test_role]) #login url login_output01 = core.CfnOutput( self, "login_output01", description="Login URL for user02", value=f"https://{core.Aws.ACCOUNT_ID}.signin.aws.amazon.com/console" )
def __init__(self, scope: cdk.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # Version of ParallelCluster for Cloud9. pcluster_version = cdk.CfnParameter( self, 'ParallelClusterVersion', description= 'Specify a custom parallelcluster version. See https://pypi.org/project/aws-parallelcluster/#history for options.', default='2.8.0', type='String', allowed_values=get_version_list('aws-parallelcluster')) # S3 URI for Config file config = cdk.CfnParameter( self, 'ConfigS3URI', description='Set a custom parallelcluster config file.', default= 'https://notearshpc-quickstart.s3.amazonaws.com/{0}/config.ini'. format(__version__)) # Password password = cdk.CfnParameter( self, 'UserPasswordParameter', description='Set a password for the hpc-quickstart user', no_echo=True) # create a VPC vpc = ec2.Vpc( self, 'VPC', cidr='10.0.0.0/16', gateway_endpoints={ "S3": ec2.GatewayVpcEndpointOptions( service=ec2.GatewayVpcEndpointAwsService.S3), "DynamoDB": ec2.GatewayVpcEndpointOptions( service=ec2.GatewayVpcEndpointAwsService.DYNAMODB) }, max_azs=99) # create a private and public subnet per vpc selection = vpc.select_subnets(subnet_type=ec2.SubnetType.PRIVATE) # Output created subnets for i, public_subnet in enumerate(vpc.public_subnets): cdk.CfnOutput(self, 'PublicSubnet%i' % i, value=public_subnet.subnet_id) for i, private_subnet in enumerate(vpc.private_subnets): cdk.CfnOutput(self, 'PrivateSubnet%i' % i, value=private_subnet.subnet_id) cdk.CfnOutput(self, 'VPCId', value=vpc.vpc_id) # Create a Bucket data_bucket = s3.Bucket(self, "DataRepository") cdk.CfnOutput(self, 'DataRespository', value=data_bucket.bucket_name) cloudtrail_bucket = s3.Bucket(self, "CloudTrailLogs") quickstart_bucket = s3.Bucket.from_bucket_name(self, 'QuickStartBucket', 'aws-quickstart') # Upload Bootstrap Script to that bucket bootstrap_script = assets.Asset(self, 'BootstrapScript', path='scripts/bootstrap.sh') # Upload parallel cluster post_install_script to that bucket pcluster_post_install_script = assets.Asset( self, 'PclusterPostInstallScript', path='scripts/post_install_script.sh') # Upload parallel cluster post_install_script to that bucket pcluster_config_script = assets.Asset(self, 'PclusterConfigScript', path='scripts/config.ini') # Setup CloudTrail cloudtrail.Trail(self, 'CloudTrail', bucket=cloudtrail_bucket) # Create a Cloud9 instance # Cloud9 doesn't have the ability to provide userdata # Because of this we need to use SSM run command cloud9_instance = cloud9.Ec2Environment( self, 'ResearchWorkspace', vpc=vpc, instance_type=ec2.InstanceType( instance_type_identifier='c5.large')) cdk.CfnOutput(self, 'Research Workspace URL', value=cloud9_instance.ide_url) # Create a keypair in lambda and store the private key in SecretsManager c9_createkeypair_role = iam.Role( self, 'Cloud9CreateKeypairRole', assumed_by=iam.ServicePrincipal('lambda.amazonaws.com')) c9_createkeypair_role.add_managed_policy( iam.ManagedPolicy.from_aws_managed_policy_name( 'service-role/AWSLambdaBasicExecutionRole')) # Add IAM permissions to the lambda role c9_createkeypair_role.add_to_policy( iam.PolicyStatement( actions=['ec2:CreateKeyPair', 'ec2:DeleteKeyPair'], resources=['*'], )) # Lambda for Cloud9 keypair c9_createkeypair_lambda = _lambda.Function( self, 'C9CreateKeyPairLambda', runtime=_lambda.Runtime.PYTHON_3_6, handler='lambda_function.handler', timeout=cdk.Duration.seconds(300), role=c9_createkeypair_role, code=_lambda.Code.asset('functions/source/c9keypair'), ) c9_createkeypair_provider = cr.Provider( self, "C9CreateKeyPairProvider", on_event_handler=c9_createkeypair_lambda) c9_createkeypair_cr = cfn.CustomResource( self, "C9CreateKeyPair", provider=c9_createkeypair_provider, properties={'ServiceToken': c9_createkeypair_lambda.function_arn}) #c9_createkeypair_cr.node.add_dependency(instance_id) c9_ssh_private_key_secret = secretsmanager.CfnSecret( self, 'SshPrivateKeySecret', secret_string=c9_createkeypair_cr.get_att_string('PrivateKey')) # The iam policy has a <REGION> parameter that needs to be replaced. # We do it programmatically so future versions of the synth'd stack # template include all regions. with open('iam/ParallelClusterUserPolicy.json') as json_file: data = json.load(json_file) for s in data['Statement']: if s['Sid'] == 'S3ParallelClusterReadOnly': s['Resource'] = [] for r in region_info.RegionInfo.regions: s['Resource'].append( 'arn:aws:s3:::{0}-aws-parallelcluster*'.format( r.name)) parallelcluster_user_policy = iam.CfnManagedPolicy( self, 'ParallelClusterUserPolicy', policy_document=iam.PolicyDocument.from_json(data)) # Cloud9 IAM Role cloud9_role = iam.Role( self, 'Cloud9Role', assumed_by=iam.ServicePrincipal('ec2.amazonaws.com')) cloud9_role.add_managed_policy( iam.ManagedPolicy.from_aws_managed_policy_name( 'AmazonSSMManagedInstanceCore')) cloud9_role.add_managed_policy( iam.ManagedPolicy.from_aws_managed_policy_name('AWSCloud9User')) cloud9_role.add_managed_policy( iam.ManagedPolicy.from_managed_policy_arn( self, 'AttachParallelClusterUserPolicy', parallelcluster_user_policy.ref)) cloud9_role.add_to_policy( iam.PolicyStatement(resources=['*'], actions=[ 'ec2:DescribeInstances', 'ec2:DescribeVolumes', 'ec2:ModifyVolume' ])) cloud9_role.add_to_policy( iam.PolicyStatement(resources=[c9_ssh_private_key_secret.ref], actions=['secretsmanager:GetSecretValue'])) cloud9_role.add_to_policy( iam.PolicyStatement( actions=["s3:Get*", "s3:List*"], resources=[ "arn:aws:s3:::%s/*" % (data_bucket.bucket_name), "arn:aws:s3:::%s" % (data_bucket.bucket_name) ])) bootstrap_script.grant_read(cloud9_role) pcluster_post_install_script.grant_read(cloud9_role) pcluster_config_script.grant_read(cloud9_role) # Admin Group admin_group = iam.Group(self, 'AdminGroup') admin_group.add_managed_policy( iam.ManagedPolicy.from_aws_managed_policy_name( 'AdministratorAccess')) admin_group.add_managed_policy( iam.ManagedPolicy.from_aws_managed_policy_name( 'AWSCloud9Administrator')) # PowerUser Group poweruser_group = iam.Group(self, 'PowerUserGroup') poweruser_group.add_managed_policy( iam.ManagedPolicy.from_aws_managed_policy_name('PowerUserAccess')) poweruser_group.add_managed_policy( iam.ManagedPolicy.from_aws_managed_policy_name( 'AWSCloud9Administrator')) # HPC User user = iam.CfnUser( self, 'Researcher', groups=[admin_group.node.default_child.ref], login_profile=iam.CfnUser.LoginProfileProperty( password_reset_required=True, password=cdk.SecretValue.cfn_parameter(password).to_string())) create_user = cdk.CfnParameter(self, "CreateUser", default="false", type="String", allowed_values=['true', 'false' ]).value_as_string user_condition = cdk.CfnCondition(self, "UserCondition", expression=cdk.Fn.condition_equals( create_user, "true")) user.cfn_options.condition = user_condition cdk.CfnOutput(self, 'UserLoginUrl', value="".join([ "https://", self.account, ".signin.aws.amazon.com/console" ]), condition=user_condition) cdk.CfnOutput(self, 'UserName', value=user.ref, condition=user_condition) # Cloud9 Setup IAM Role cloud9_setup_role = iam.Role( self, 'Cloud9SetupRole', assumed_by=iam.ServicePrincipal('lambda.amazonaws.com')) cloud9_setup_role.add_managed_policy( iam.ManagedPolicy.from_aws_managed_policy_name( 'service-role/AWSLambdaBasicExecutionRole')) # Allow pcluster to be run in bootstrap cloud9_setup_role.add_managed_policy( iam.ManagedPolicy.from_managed_policy_arn( self, 'AttachParallelClusterUserPolicySetup', parallelcluster_user_policy.ref)) # Add IAM permissions to the lambda role cloud9_setup_role.add_to_policy( iam.PolicyStatement( actions=[ 'cloudformation:DescribeStackResources', 'ec2:AssociateIamInstanceProfile', 'ec2:AuthorizeSecurityGroupIngress', 'ec2:DescribeInstances', 'ec2:DescribeInstanceStatus', 'ec2:DescribeInstanceAttribute', 'ec2:DescribeIamInstanceProfileAssociations', 'ec2:DescribeVolumes', 'ec2:DesctibeVolumeAttribute', 'ec2:DescribeVolumesModifications', 'ec2:DescribeVolumeStatus', 'ssm:DescribeInstanceInformation', 'ec2:ModifyVolume', 'ec2:ReplaceIamInstanceProfileAssociation', 'ec2:ReportInstanceStatus', 'ssm:SendCommand', 'ssm:GetCommandInvocation', 's3:GetObject', 'lambda:AddPermission', 'lambda:RemovePermission', 'events:PutRule', 'events:DeleteRule', 'events:PutTargets', 'events:RemoveTargets', 'cloud9:CreateEnvironmentMembership', ], resources=['*'], )) cloud9_setup_role.add_to_policy( iam.PolicyStatement(actions=['iam:PassRole'], resources=[cloud9_role.role_arn])) cloud9_setup_role.add_to_policy( iam.PolicyStatement( actions=['lambda:AddPermission', 'lambda:RemovePermission'], resources=['*'])) # Cloud9 Instance Profile c9_instance_profile = iam.CfnInstanceProfile( self, "Cloud9InstanceProfile", roles=[cloud9_role.role_name]) # Lambda to add Instance Profile to Cloud9 c9_instance_profile_lambda = _lambda.Function( self, 'C9InstanceProfileLambda', runtime=_lambda.Runtime.PYTHON_3_6, handler='lambda_function.handler', timeout=cdk.Duration.seconds(900), role=cloud9_setup_role, code=_lambda.Code.asset('functions/source/c9InstanceProfile'), ) c9_instance_profile_provider = cr.Provider( self, "C9InstanceProfileProvider", on_event_handler=c9_instance_profile_lambda, ) instance_id = cfn.CustomResource(self, "C9InstanceProfile", provider=c9_instance_profile_provider, properties={ 'InstanceProfile': c9_instance_profile.ref, 'Cloud9Environment': cloud9_instance.environment_id, }) instance_id.node.add_dependency(cloud9_instance) # Lambda for Cloud9 Bootstrap c9_bootstrap_lambda = _lambda.Function( self, 'C9BootstrapLambda', runtime=_lambda.Runtime.PYTHON_3_6, handler='lambda_function.handler', timeout=cdk.Duration.seconds(900), role=cloud9_setup_role, code=_lambda.Code.asset('functions/source/c9bootstrap'), ) c9_bootstrap_provider = cr.Provider( self, "C9BootstrapProvider", on_event_handler=c9_bootstrap_lambda) c9_bootstrap_cr = cfn.CustomResource( self, "C9Bootstrap", provider=c9_bootstrap_provider, properties={ 'Cloud9Environment': cloud9_instance.environment_id, 'BootstrapPath': 's3://%s/%s' % (bootstrap_script.s3_bucket_name, bootstrap_script.s3_object_key), 'Config': config, 'VPCID': vpc.vpc_id, 'MasterSubnetID': vpc.public_subnets[0].subnet_id, 'ComputeSubnetID': vpc.private_subnets[0].subnet_id, 'PostInstallScriptS3Url': "".join([ 's3://', pcluster_post_install_script.s3_bucket_name, "/", pcluster_post_install_script.s3_object_key ]), 'PostInstallScriptBucket': pcluster_post_install_script.s3_bucket_name, 'S3ReadWriteResource': data_bucket.bucket_arn, 'S3ReadWriteUrl': 's3://%s' % (data_bucket.bucket_name), 'KeyPairId': c9_createkeypair_cr.ref, 'KeyPairSecretArn': c9_ssh_private_key_secret.ref, 'UserArn': user.attr_arn, 'PclusterVersion': pcluster_version.value_as_string }) c9_bootstrap_cr.node.add_dependency(instance_id) c9_bootstrap_cr.node.add_dependency(c9_createkeypair_cr) c9_bootstrap_cr.node.add_dependency(c9_ssh_private_key_secret) c9_bootstrap_cr.node.add_dependency(data_bucket) enable_budget = cdk.CfnParameter(self, "EnableBudget", default="true", type="String", allowed_values=['true', 'false' ]).value_as_string # Budgets budget_properties = { 'budgetType': "COST", 'timeUnit': "ANNUALLY", 'budgetLimit': { 'amount': cdk.CfnParameter( self, 'BudgetLimit', description= 'The initial budget for this project in USD ($).', default=2000, type='Number').value_as_number, 'unit': "USD", }, 'costFilters': None, 'costTypes': { 'includeCredit': False, 'includeDiscount': True, 'includeOtherSubscription': True, 'includeRecurring': True, 'includeRefund': True, 'includeSubscription': True, 'includeSupport': True, 'includeTax': True, 'includeUpfront': True, 'useAmortized': False, 'useBlended': False, }, 'plannedBudgetLimits': None, 'timePeriod': None, } email = { 'notification': { 'comparisonOperator': "GREATER_THAN", 'notificationType': "ACTUAL", 'threshold': 80, 'thresholdType': "PERCENTAGE", }, 'subscribers': [{ 'address': cdk.CfnParameter( self, 'NotificationEmail', description= 'This email address will receive billing alarm notifications when 80% of the budget limit is reached.', default='*****@*****.**').value_as_string, 'subscriptionType': "EMAIL", }] } overall_budget = budgets.CfnBudget( self, "HPCBudget", budget=budget_properties, notifications_with_subscribers=[email], ) overall_budget.cfn_options.condition = cdk.CfnCondition( self, "BudgetCondition", expression=cdk.Fn.condition_equals(enable_budget, "true"))
def __init__(self, scope: core.Construct, id: str, ** kwargs) -> None: super().__init__(scope, id, **kwargs) # Let us IAM Users & Groups): user1_pass = _secretsmanager.Secret(self, "user1Pass", description="Password for User1", secret_name="user1_pass" ) # Add User1 with SecretsManager Password user1 = _iam.User(self, "user1", password=user1_pass.secret_value, user_name="user1" ) # Add IAM Group konstone_group = _iam.Group(self, "konStoneGroup", group_name="konstone_group" ) # Add User to Group konstone_group.add_user(user1) # Add Managed Policy to Group konstone_group.add_managed_policy( _iam.ManagedPolicy.from_aws_managed_policy_name( "AmazonS3ReadOnlyAccess") ) # SSM parameter 1 param1 = _ssm.StringParameter( self, "parameter1", description="Keys To KonStone", parameter_name="/konstone/keys/fish", string_value="130481", tier=_ssm.ParameterTier.STANDARD ) # SSM parameter 2 param2 = _ssm.StringParameter( self, "parameter2", description="Keys To KonStone", parameter_name="/konstone/keys/fish/gold", string_value="130482", tier=_ssm.ParameterTier.STANDARD ) # Grant Konstone group permission to Param 1 param1.grant_read(konstone_group) # Grant Group to LIST ALL SSM Parameters in Console grpStmt1 = _iam.PolicyStatement( effect=_iam.Effect.ALLOW, resources=["*"], actions=[ "ssm:DescribeParameters" ] ) grpStmt1.sid = "DescribeAllParametersInConsole" # Add Permissions To Group konstone_group.add_to_policy(grpStmt1) # Create IAM Role konstone_ops_role = _iam.Role( self, 'konstoneOpsRole', assumed_by=_iam.AccountPrincipal(f"{core.Aws.ACCOUNT_ID}"), role_name="konstone_ops_role" ) # Create Managed Policy & Attach to Role list_ec2_policy = _iam.ManagedPolicy( self, "listEc2Instances", description="list ec2 isntances in the account", managed_policy_name="list_ec2_policy", statements=[ _iam.PolicyStatement( effect=_iam.Effect.ALLOW, actions=[ "ec2:Describe*", "cloudwatch:Describe*", "cloudwatch:Get*" ], resources=["*"] ) ], roles=[ konstone_ops_role ] ) # Login Url Autogeneration output_1 = core.CfnOutput(self, "user1LoginUrl", description="LoginUrl for User1", value=f"https://{core.Aws.ACCOUNT_ID}.signin.aws.amazon.com/console" )
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) NOTIFY_EMAIL = self.node.try_get_context("notify_email") SLACK_WEBHOOK_URL = self.node.try_get_context("webhook_url") WHITE_LIST_GROUP = self.node.try_get_context("white_list_group") if ( not NOTIFY_EMAIL or not SLACK_WEBHOOK_URL or not WHITE_LIST_GROUP ): logger.error(f"Required context variables for {id} were not provided!") else: # The code that defines your stack goes here ep1 = { "source": [ "aws.logs" ] } ep2 = { "source": [ "aws.cloudwatch" ] } rule1 = events.Rule(self, "cdkRule1_clw", description='Rule created by CLW CDK', enabled=True, rule_name="rule1bycdk_clw", event_pattern=ep1) rule2 = events.Rule(self, "cdkRule2_clw", description='Rule created by CLW CDK', enabled=True, rule_name="rule2bycdk_clw", event_pattern=ep2) # 3. Create response lambda and add it as a target of the rule lambda_dir_path = os.path.join(os.getcwd(), "ir_cdk_stacks", "in_clw_01") response_lambda = _lambda.Function( self, "InClw01ResponseFunction", runtime=_lambda.Runtime.PYTHON_3_7, handler="clwUnauthAccessResponse.lambda_handler", code=_lambda.Code.from_asset(lambda_dir_path), function_name="InClw01ResponseFunction", environment = { "webhook_url": SLACK_WEBHOOK_URL, "white_list_group": WHITE_LIST_GROUP, } ) response_lambda.add_to_role_policy( iam.PolicyStatement( actions=["*"], effect=iam.Effect.ALLOW, resources=["*"], ) ) rule1.add_target(event_target.LambdaFunction(response_lambda)) rule2.add_target(event_target.LambdaFunction(response_lambda)) # 4. Create SNS topic and subscription topic = sns.Topic(self, "CDKCLWAccess", topic_name="CDKCLWAccess") topic.add_subscription(subs.EmailSubscription(NOTIFY_EMAIL)) #topic.add_subscription(subs.EmailSubscription('*****@*****.**')) # 5. Create IAM allow/deny policy clwDenyAccessPolicy1 = iam.ManagedPolicy(self, "InCLW01DenyPolicy1", managed_policy_name = "ClWDenyAccess1", statements=[ iam.PolicyStatement( effect=iam.Effect.DENY, actions=["logs:*"], resources=["*"] ) ]) clwDenyAccessPolicy2 = iam.ManagedPolicy(self, "InCLW01DenyPolicy2", managed_policy_name="ClWDenyAccess2", statements=[ iam.PolicyStatement( effect=iam.Effect.DENY, actions=["cloudwatch:*"], resources=["*"] ) ]) # 6. Create IAM group clwAccessGroup = iam.Group( self, "clwAccessGroup", group_name = "clwAccessGroup" )
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) bkt01 = s3.Bucket( self, "abacBucket", versioned=True, # encryption=s3.BucketEncryption.KMS_MANAGED, block_public_access=s3.BlockPublicAccess(block_public_policy=True), removal_policy=core.RemovalPolicy.DESTROY) bkt01.add_to_resource_policy( iam.PolicyStatement( effect=iam.Effect.ALLOW, # actions=["s3:GetObject"], actions=["s3:*"], # resources=[bkt01.arn_for_objects("file.txt")], resources=[bkt01.arn_for_objects("*")], principals=[iam.AccountRootPrincipal()])) # Create 3 Users: 1 Admin & 2 Normal Users redUser1 = iam.User( self, "redUser1", user_name="redUser", password=core.SecretValue.plain_text("redUser1SUPERDUMBpassWord")) blueUser1 = iam.User( self, "blueUser1", user_name="blueUser", password=core.SecretValue.plain_text("blueUser1SUPERDUMBpassWord")) adminUser1 = iam.User(self, "adminUser1", user_name="adminUser", password=core.SecretValue.plain_text( "adminUser1SUPERDUMBpassWord")) unicornGrp = iam.Group(self, "unicornGrp", group_name="unicornGroup") # Add Users To Group unicornGrp.add_user(redUser1) unicornGrp.add_user(blueUser1) unicornGrp.add_user(adminUser1) # blueGrp1.add_managed_policy(iam.ManagedPolicy.from_aws_managed_policy_name("AmazonS3ReadOnlyAccess")) ############################################## # We need a custom resource to TAG IAM Users # ############################################## resource = MyCustomResource(self, "iamTagger", message=[{ "user": redUser1.user_name, "tags": [{ 'Key': 'teamName', 'Value': 'teamUnicorn' }, { 'Key': 'projectName', 'Value': 'projectRed' }] }, { "user": blueUser1.user_name, "tags": [{ 'Key': 'teamName', 'Value': 'teamUnicorn' }, { 'Key': 'projectName', 'Value': 'projectBlue' }] }, { "user": adminUser1.user_name, "tags": [{ 'Key': 'teamName', 'Value': 'teamUnicorn' }, { 'Key': 'projectAdmin', 'Value': 'yes' }] }]) # Publish the custom resource output core.CfnOutput( self, "ResponseMessage", description="The message that came back from the Custom Resource", value=resource.response, ) # Lets Create the IAM Role to be used by the groups accountId = core.Aws.ACCOUNT_ID unicornTeamProjectRedRole = iam.Role( self, 'unicornTeamProjectRedRoleId', assumed_by=iam.AccountPrincipal(f"{accountId}"), role_name="unicornTeamProjectRedRole") core.Tag.add(unicornTeamProjectRedRole, key="teamName", value="teamUnicorn") core.Tag.add(unicornTeamProjectRedRole, key="projectName", value="projectRed") unicornTeamProjectBlueRole = iam.Role( self, 'unicornTeamProjectBlueRoleId', assumed_by=iam.AccountPrincipal(f"{accountId}"), role_name="unicornTeamProjectBlueRole") core.Tag.add(unicornTeamProjectBlueRole, key="teamName", value="teamUnicorn") core.Tag.add(unicornTeamProjectBlueRole, key="projectName", value="projectBlue") unicornTeamProjectAdminRole = iam.Role( self, 'unicornTeamProjectAdminRoleId', assumed_by=iam.AccountPrincipal(f"{accountId}"), role_name="unicornTeamProjectAdminRole") core.Tag.add(unicornTeamProjectAdminRole, key="teamName", value="teamUnicorn") core.Tag.add(unicornTeamProjectAdminRole, key="projectAdmin", value="yes") # Allow Group to Assume Role grpStmt1 = iam.PolicyStatement( effect=iam.Effect.ALLOW, resources=[f"arn:aws:iam::{accountId}:role/unicornTeamProject*"], actions=["sts:AssumeRole"], conditions={ "StringEquals": { "iam:ResourceTag/teamName": "${aws:PrincipalTag/teamName}", "iam:ResourceTag/projectName": "${aws:PrincipalTag/projectName}" } }) grpStmt1.sid = "AllowGroupMembersToAssumeRoleMatchingTeamName" unicornGrp.add_to_policy(grpStmt1) # Add Permissions to the Role roleStmt1 = iam.PolicyStatement( effect=iam.Effect.ALLOW, resources=["*"], actions=["s3:ListAllMyBuckets", "s3:HeadBucket"]) roleStmt1.sid = "AllowGroupToSeeBucketListInTheConsole" roleStmt2 = iam.PolicyStatement( effect=iam.Effect.ALLOW, resources=[bkt01.bucket_arn], actions=["s3:ListBucket", "s3:ListBucketVersions"], # Below condition can be used to enable listing a particular prefix in another statement # conditions={ "StringEquals" : { "s3:prefix":[""], "s3:delimiter":["/"] } } ) roleStmt2.sid = "AllowRootLevelListingOfBucket" roleStmt3 = iam.PolicyStatement( effect=iam.Effect.ALLOW, resources=[bkt01.arn_for_objects("*")], actions=["s3:Get*"], conditions={ "StringEquals": { "s3:ExistingObjectTag/teamName": "${aws:PrincipalTag/teamName}", "s3:ExistingObjectTag/projectName": "${aws:PrincipalTag/projectName}" } }) roleStmt3.sid = "ReadOnlyAccessToTeams" roleStmt4 = iam.PolicyStatement( effect=iam.Effect.ALLOW, resources=[bkt01.arn_for_objects("*")], actions=[ "s3:PutObject", "s3:PutObjectTagging", "s3:PutObjectVersionTagging" ], conditions={ "StringEquals": { "s3:RequestObjectTag/teamName": "${aws:PrincipalTag/teamName}", "s3:RequestObjectTag/projectName": "${aws:PrincipalTag/projectName}" } }) roleStmt4.sid = "WriteTaggedObjectOwnedByThem" roleStmt5 = iam.PolicyStatement( effect=iam.Effect.ALLOW, resources=[bkt01.bucket_arn, bkt01.arn_for_objects("*")], actions=["s3:*"], conditions={ "StringEquals": { "${aws:PrincipalTag/projectAdmin}": ["yes"] } }) roleStmt5.sid = "FullAccessToAdminsFromSameTeam" unicornTeamProjectRedRole.add_to_policy(roleStmt1) unicornTeamProjectRedRole.add_to_policy(roleStmt2) unicornTeamProjectRedRole.add_to_policy(roleStmt3) unicornTeamProjectRedRole.add_to_policy(roleStmt4) unicornTeamProjectRedRole.add_to_policy(roleStmt5) # Add same permissions to projectBlueRole unicornTeamProjectBlueRole.add_to_policy(roleStmt1) unicornTeamProjectBlueRole.add_to_policy(roleStmt2) unicornTeamProjectBlueRole.add_to_policy(roleStmt3) unicornTeamProjectBlueRole.add_to_policy(roleStmt4) unicornTeamProjectBlueRole.add_to_policy(roleStmt5) # Add same permissions to projectAdminRole unicornTeamProjectAdminRole.add_to_policy(roleStmt1) unicornTeamProjectAdminRole.add_to_policy(roleStmt2) unicornTeamProjectAdminRole.add_to_policy(roleStmt3) unicornTeamProjectAdminRole.add_to_policy(roleStmt4) unicornTeamProjectAdminRole.add_to_policy(roleStmt5)
def __init__(self, scope: cdk.Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) """ Create IAM, Users & Groups: """ # Create Users passwords: user1_pass = _secrets_manager.Secret(self, "user1Pass", description="Password for User1", secret_name="user1_pass") # Add user1 with SecretsManager Password: user1 = _iam.User(self, "user1", password=user1_pass.secret_value, user_name="user1") # Add user2 with Literal Password (NOT RECOMMENDED): user2 = _iam.User( self, "user2", password=cdk.SecretValue.plain_text("Dont-Use-B@d-Passw0rds"), user_name="user2") """ Add IAM Group: """ # Create IAM Group: konstone_group = _iam.Group(self, "konStoneGroup", group_name="konstone_group") # Add Users to Group: konstone_group.add_user(user1) # Add Manage Policy To Group: konstone_group.add_managed_policy( _iam.ManagedPolicy.from_aws_managed_policy_name( "AmazonS3ReadOnlyAccess")) # SSM parameter store 1: param1 = _ssm.StringParameter(self, "Parameter1", description="Keys to KonStone", parameter_name="/konstone/keys/fish", string_value="130481", tier=_ssm.ParameterTier.STANDARD) # SSM parameter store 2: param1 = _ssm.StringParameter( self, "Parameter2", description="Keys to KonStone", parameter_name="/konstone/keys/fish/gold", string_value="130481", tier=_ssm.ParameterTier.STANDARD) # Grant Group to LIST/describe all SSM Parameters in the console: group_statement_1 = _iam.PolicyStatement( effect=_iam.Effect.ALLOW, resources=["*"], actions=["ssm:DescribeParameters"]) # This's the SID in the policy group_statement_1.sid = "DescribeAllParametersInTheConsole" # Add policy to the group: konstone_group.add_to_policy(group_statement_1) # Create IAM Role: konstone_ops_role = _iam.Role( self, "konstoneOpsRole", assumed_by=_iam.AccountPrincipal(f"{cdk.Aws.ACCOUNT_ID}"), role_name="konstone_ops_role") # Create Managed Policy & Attached ROle: list_ec2_policy = _iam.ManagedPolicy( self, "listEc2Instances", description="List ec2 instances in the account", managed_policy_name="list_ec2_policy", statements=[ _iam.PolicyStatement(effect=_iam.Effect.ALLOW, actions=[ "ec2:Describe*", "cloudwatch:Describe*", "cloudwatch:Get*" ], resources=["*"]) ], roles=[konstone_ops_role]) # Grant Konstone group permission to Param 1: param1.grant_read(konstone_group) """ Login Url Autogenerate: """ # Below we'll get the user Url to sign in to the AWS console: output_1 = cdk.CfnOutput( self, "user2Login", description="LoginUrl for User2", value=f"https://{cdk.Aws.ACCOUNT_ID}.signin.aws.amazon.com/console" )
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # add user 1 with secret manager password user1_pass = _secretsmanager.Secret( self, "user1Pass", description="Password for user 1", secret_name="user1_pass" ) user1 = _iam.User(self, "user1", password=user1_pass.secret_value, user_name="user1") # add user 2 with literal password user2 = _iam.User( self, "user2", password=core.SecretValue("dont-use-bad-password@123"), user_name="user2" ) # add user 2 to group group1 = _iam.Group(self, "group1Id", group_name="group1") group1.add_user(user2) # add managed policy to group group1.add_managed_policy( _iam.ManagedPolicy.from_aws_managed_policy_name("AmazonS3ReadOnlyAccess") ) # add inline policy - specific resource param = _ssm.StringParameter( self, "parameterId", description="parameter", parameter_name="/foo", string_value="bar", tier=_ssm.ParameterTier.STANDARD, ) param.grant_read(group1) # add inline policy - list all parameters in console group_statement1 = _iam.PolicyStatement( sid="DescribeAllParameters", effect=_iam.Effect.ALLOW, resources=["*"], actions=["ssm:DescribeParameters"], ) group1.add_to_policy(group_statement1) # create iam role ops_role = _iam.Role( self, "opsRole", assumed_by=_iam.AccountPrincipal(f"{core.Aws.ACCOUNT_ID}"), role_name="ops_role", ) list_ec2_policy = _iam.ManagedPolicy( self, "listEc2Instances", description="list ec2 instances in the account", managed_policy_name="list_ec2_policy", statements=[ _iam.PolicyStatement( effect=_iam.Effect.ALLOW, actions=["ec2:Describe*", "cloudwatch:Describe*", "cloudwatch:Get*"], resources=["*"], ) ], roles=[ops_role], ) # login url autogeneration output1 = core.CfnOutput( self, "user2LoginUrl", description="Login for user 2", value=f"https://{core.Aws.ACCOUNT_ID}.signin.aws.amazon.com/console", )