def s3_solutions_access(self): return iam.PolicyDocument(statements=[ iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ "s3:GetObject", "s3:ListBucket", "s3:ListObjects", ], resources=[ Fn.sub( "arn:${AWS::Partition}:s3:::${bucket}-${AWS::Region}/*", variables={ "bucket": Fn.find_in_map("SourceCode", "General", "S3Bucket") }, ), Fn.sub( "arn:${AWS::Partition}:s3:::${bucket}-${AWS::Region}", variables={ "bucket": Fn.find_in_map("SourceCode", "General", "S3Bucket") }, ), ], ) ])
def _create_user_pool(self, domain_prefix: str) -> CfnUserPool: # UserPool will unnecessarily create a role for SMS sending: https://github.com/aws/aws-cdk/issues/6943 # But such a role is required by CloudFormation to be able to enable MFA (even if it's only OTP): https://github.com/awsdocs/aws-cloudformation-user-guide/issues/73 # And it's not actually possible to configure only OTP MFA if there's a SMS configuration user_pool = UserPool(self, 'UserPool', mfa=Mfa.REQUIRED, mfa_second_factor=MfaSecondFactor(otp=True, sms=True), user_pool_name='cognito-demo', self_sign_up_enabled=True) user_pool_cfn = user_pool.node.default_child user_pool_domain = CfnUserPoolDomain(self, 'CognitoDomain', domain=domain_prefix, user_pool_id=user_pool_cfn.ref) CfnOutput(self, 'PROVIDER_NAME', value=user_pool_cfn.attr_provider_name) domain_name = Fn.join( '.', [user_pool_domain.ref, 'auth', Aws.REGION, 'amazoncognito.com']) CfnOutput(self, 'COGNITO_URL', value=Fn.join('', ['https://', domain_name])) return user_pool_cfn
def _add_imagebuilder_image(self, build_tags, lambda_cleanup_policy_statements): # ImageBuilderImage image_resource = imagebuilder.CfnImage( self, IMAGEBUILDER_RESOURCE_NAME_PREFIX, tags=build_tags, image_recipe_arn=Fn.ref("ImageRecipe"), infrastructure_configuration_arn=Fn.ref( "InfrastructureConfiguration"), distribution_configuration_arn=Fn.ref("DistributionConfiguration"), enhanced_image_metadata_enabled=False, ) if not self.custom_cleanup_lambda_role: self._add_resource_delete_policy( lambda_cleanup_policy_statements, ["imagebuilder:DeleteImage"], [ self.format_arn( service="imagebuilder", resource="image", resource_name="{0}/*".format( self._build_image_recipe_name(to_lower=True)), ) ], ) return image_resource
def _add_imagebuilder_infrastructure_configuration( self, build_tags, instance_profile_name, lambda_cleanup_policy_statements ): # ImageBuilderInfrastructureConfiguration infrastructure_configuration_resource = imagebuilder.CfnInfrastructureConfiguration( self, "InfrastructureConfiguration", name=self._build_resource_name(IMAGEBUILDER_RESOURCE_NAME_PREFIX), tags=build_tags, instance_profile_name=instance_profile_name or Fn.ref("InstanceProfile"), terminate_instance_on_failure=self.config.dev_settings.terminate_instance_on_failure if self.config.dev_settings and self.config.dev_settings.terminate_instance_on_failure is not None else True, instance_types=[self.config.build.instance_type], security_group_ids=self.config.build.security_group_ids, subnet_id=self.config.build.subnet_id, sns_topic_arn=Fn.ref("BuildNotificationTopic"), ) if not self.custom_cleanup_lambda_role: self._add_resource_delete_policy( lambda_cleanup_policy_statements, ["imagebuilder:DeleteInfrastructureConfiguration"], [ self.format_arn( service="imagebuilder", resource="infrastructure-configuration", resource_name="{0}".format( self._build_resource_name(IMAGEBUILDER_RESOURCE_NAME_PREFIX, to_lower=True) ), ) ], ) return infrastructure_configuration_resource
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) env = "dev" project = "testproject1" service = "api" component = "buckets" Tag.add(self, "Service", service) Tag.add(self, "Component", component) bucket_a = aws_s3.Bucket( self, "BucketA", bucket_name=generate_resource_name(project, env, service, component, "bucketa"), removal_policy=RemovalPolicy.DESTROY, ) bucket_b = aws_s3.Bucket( self, "BucketB", bucket_name=generate_resource_name(project, env, service, component, "bucketb"), removal_policy=RemovalPolicy.DESTROY, ) value = Fn.sub( "test: ${value_to_import}", { "value_to_import": Fn.import_value( generate_resource_name(project, env, "etl", component, "bucketb")) }, ) aws_ssm.StringParameter( self, "SSMParam", parameter_name=generate_resource_name(project, env, service, component, "ssmparam"), string_value=value, ) core.CfnOutput( self, id="OutputBucketA", value=bucket_a.bucket_name, export_name=generate_resource_name(project, env, service, component, "bucketa"), ) core.CfnOutput( self, id="OutputBucketB", value=bucket_b.bucket_name, export_name=generate_resource_name(project, env, service, component, "bucketb"), )
def get_notebook_prefix(self): if self._is_solution_build(): prefix = Fn.sub( "${prefix}/notebooks", variables={ "prefix": Fn.find_in_map("SourceCode", "General", "KeyPrefix") }, ) else: prefix = "notebooks" return Fn.base64(prefix)
def get_file_system(scope: Construct) -> FileSystem: config = get_volume_config() stack_name = config.stack_name security_group = SecurityGroup.from_security_group_id( scope, 'nfs_security_group', security_group_id=Fn.import_value(stack_name + 'SecurityGroupId')) return FileSystem.from_file_system_attributes( scope, 'filesystem', file_system_id=Fn.import_value(stack_name + 'FileSystemId'), security_group=security_group)
def get_notebook_source(self, data_bucket: IBucket): if self._is_solution_build(): notebook_source_bucket = Fn.sub( "${bucket}-${region}", variables={ "bucket": Fn.find_in_map("SourceCode", "General", "S3Bucket"), "region": Aws.REGION, }, ) else: notebook_source_bucket = data_bucket.bucket_name return Fn.base64(notebook_source_bucket)
def _create_identity_pool(self, user_pool: CfnUserPool, client: CfnUserPoolClient) -> CfnIdentityPool: cognito_provider = CfnIdentityPool.CognitoIdentityProviderProperty( client_id=client.ref, provider_name=user_pool.attr_provider_name, server_side_token_check=True) identity_pool = CfnIdentityPool( self, 'IdentityPool', allow_unauthenticated_identities=False, identity_pool_name='cognito-demo', cognito_identity_providers=[cognito_provider]) CfnIdentityPoolRoleAttachment( self, 'IdentityPoolRoleAttachment', identity_pool_id=identity_pool.ref, roles={}, role_mappings={ 'cognito-user-pool': CfnIdentityPoolRoleAttachment.RoleMappingProperty( type='Token', ambiguous_role_resolution='Deny', identity_provider=Fn.join( ':', [user_pool.attr_provider_name, client.ref])) }) CfnOutput(self, 'IDENTITY_POOL_ID', value=identity_pool.ref) return identity_pool
def _add_instance_profile(self, cleanup_policy_statements, instance_role=None): """Set default instance profile in imagebuilder cfn template.""" instance_profile_resource = iam.CfnInstanceProfile( self, "InstanceProfile", path=IAM_ROLE_PATH, roles=[ instance_role.split("/")[-1] if instance_role else Fn.ref("InstanceRole") ], instance_profile_name=self._build_resource_name( IMAGEBUILDER_RESOURCE_NAME_PREFIX), ) if not self.custom_cleanup_lambda_role: self._add_resource_delete_policy( cleanup_policy_statements, ["iam:DeleteInstanceProfile"], [ self.format_arn( service="iam", region="", resource="instance-profile", resource_name="{0}/{1}".format( IAM_ROLE_PATH.strip("/"), self._build_resource_name( IMAGEBUILDER_RESOURCE_NAME_PREFIX), ), ) ], ) return instance_profile_resource
def __init__(self, scope: cdk.Construct, id: str, **kwargs): super().__init__(scope, id, **kwargs) bucketName = CfnParameter(self, "BucketName") self.template_options.metadata = { 'AWS::CloudFormation::Interface': { 'ParameterGroups': [{ 'Label': { 'default': 'Bucket Configuration' }, 'Parameters': [bucketName.logical_id] }], 'ParameterLabels': { bucketName.logical_id: { 'default': 'Which name should the bucket have' } } } } bucket = Bucket(self, 'test-bucket', bucket_name=bucketName.value_as_string) CfnOutput(self, 'S3Id', value=bucket.bucket_arn, export_name=Fn.sub('${AWS::StackName}-S3Id'))
def __create_s3_trigger_lambda_invoke_permission( self, bucket_name: str, s3_trigger_lambda_function: aws_lambda.Function ) -> aws_lambda.Permission: return aws_lambda.CfnPermission( self, 'S3TriggerLambdaInvokePermission', function_name=s3_trigger_lambda_function.function_name, action='lambda:InvokeFunction', principal='s3.amazonaws.com', source_account=Fn.ref('AWS::AccountId'), source_arn=f'arn:aws:s3:::{bucket_name}')
def get_cluster(scope: Construct, vpc: Vpc) -> Cluster: config = get_cluster_config() stack_name = config.stack_name return Cluster.from_cluster_attributes( scope, 'cluster', cluster_name=Fn.import_value(stack_name + 'ClusterName'), vpc=vpc, has_ec2_capacity=False, security_groups=[], )
def _add_custom_components(self, components, policy_statements, components_resources): """Set custom component in imagebuilder cfn template.""" initial_components_len = len(components) arn_components_len = 0 for custom_component in self.config.build.components: custom_components_len = len( components) - initial_components_len - arn_components_len if custom_component.type == "arn": components.append( imagebuilder.CfnImageRecipe.ComponentConfigurationProperty( component_arn=custom_component.value)) arn_components_len += 1 else: component_script_name = custom_component.value.split("/")[-1] component_id = "ScriptComponent" + str(custom_components_len) custom_component_resource = imagebuilder.CfnComponent( self, component_id, name=self._build_resource_name( IMAGEBUILDER_RESOURCE_NAME_PREFIX + "-Script-{0}".format(str(custom_components_len))), version=utils.get_installed_version( base_version_only=True), description= "This component is custom component for script, script name is {0}, script url is " "{1}".format(component_script_name, custom_component.value), platform="Linux", data=wrap_script_to_component(custom_component.value), ) components.append( imagebuilder.CfnImageRecipe.ComponentConfigurationProperty( component_arn=Fn.ref(component_id))) components_resources.append(custom_component_resource) if not self.custom_cleanup_lambda_role: self._add_resource_delete_policy( policy_statements, ["imagebuilder:DeleteComponent"], [ self.format_arn( service="imagebuilder", resource="component", resource_name="{0}/*".format( self._build_resource_name( IMAGEBUILDER_RESOURCE_NAME_PREFIX + "-Script-{0}".format( str(custom_components_len)), to_lower=True, )), ) ], )
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # IAM section admin_role = iam.Role(self, "admin", assumed_by=iam.AccountPrincipal(Fn.ref("AWS::AccountId"))) self.dev_role = iam.Role(self, "developer", assumed_by=iam.AccountPrincipal(Fn.ref("AWS::AccountId"))) admin_role.add_managed_policy(iam.ManagedPolicy.from_aws_managed_policy_name("AdministratorAccess")) self.dev_role.add_managed_policy(iam.ManagedPolicy.from_aws_managed_policy_name("ReadOnlyAccess")) # VPC section self.vpc = ec2.Vpc(self, "vpc", cidr="10.0.0.0/16", enable_dns_hostnames=True, enable_dns_support=True, max_azs=3, nat_gateways=1, subnet_configuration=[ ec2.SubnetConfiguration(name="Public", subnet_type=ec2.SubnetType.PUBLIC, cidr_mask=24), ec2.SubnetConfiguration(name="Private", subnet_type=ec2.SubnetType.PRIVATE, cidr_mask=24) ])
def __init__(self, scope: App, id: str, envs: EnvSettings, components: ComponentsStack): super().__init__(scope, id) self.backend_domain_name = StringParameter.from_string_parameter_name( self, "DomainNameParameter", string_parameter_name="/schema-cms-app/DOMAIN_NAME").string_value self.backend_url = f"https://{self.backend_domain_name}/api/v1/" self.job_processing_queues = components.data_processing_queues self.app_bucket = Bucket.from_bucket_arn( self, id="App", bucket_arn=Fn.import_value( ApiStack.get_app_bucket_arn_output_export_name(envs))) self.resize_lambda_image_bucket = Bucket.from_bucket_arn( self, id="Images", bucket_arn=Fn.import_value( ImageResizeStack. get_image_resize_bucket_arn_output_export_name(envs)), ) self.lambda_auth_token = Secret.from_secret_arn( self, id="lambda-auth-token", secret_arn=Fn.import_value( ApiStack.get_lambda_auth_token_arn_output_export_name(envs)), ) self.functions = [ self._create_lambda_fn(envs, memory_size, queue) for memory_size, queue in zip(envs.lambdas_sizes, self.job_processing_queues) ]
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) env = "dev" project = "testproject1" service = "etl" component = "workflow" Tag.add(self, "Service", service) Tag.add(self, "Component", component) param_dwh = CfnParameter( self, "ParamDWH", type="String", description="The domain of the DWH to connect to. | team=data,service=dwh", default="fakedwh.host", ) value_raw = "import: ${value_to_import}, param: ${param_dwh}" value = Fn.sub( value_raw, { "value_to_import": Fn.import_value( generate_resource_name(project, env, service, "buckets", "bucketb") ), "param_dwh": Fn.ref(param_dwh.logical_id), }, ) aws_ssm.StringParameter( self, "SSMParam", parameter_name=generate_resource_name( project, env, service, component, "ssmparam" ), string_value=value, )
def create_function(self, other_stack: Stack, id, *, code: AssetCode, handler: str, runtime: Runtime) -> IVersion: func = Function( self, id, code=code, handler=handler, runtime=runtime, role=self._role, ) # If code/runtime changes, CDK doesn't re-evaluate the version. In # result, we store an old version, and things don't work. But we also # don't want to generate a new version every run. The compromise: use # the sha256 hash of the index file. with open(f"{code.path}/index.js", "rb") as f: sha256 = hashlib.sha256(f.read()).hexdigest() version = func.add_version(f"Version-{sha256}") # Create an entry in the parameter-store that tells the arn of this lambda parameter_name = parameter_store.get_parameter_name( f"/LambdaEdge/{id}") StringParameter( self, parameter_name, string_value=Fn.join( ":", [ func.function_arn, version.version, ], ), parameter_name=parameter_name, ) other_stack.add_dependency(self) # Create a custom resource that fetches the arn of the lambda cross_region_func = LambdaEdgeFunction( other_stack, f"LambdaEdgeFunction-{sha256}", parameter_name=parameter_name, policy=AwsCustomResourcePolicy.from_sdk_calls( resources=AwsCustomResourcePolicy.ANY_RESOURCE), ) # Create the lambda function based on this arn return Version.from_version_arn(other_stack, id, cross_region_func.get_arn())
def get_vpc(scope: Construct) -> Vpc: config = get_cluster_config() stack_name = config.stack_name return Vpc.from_vpc_attributes( scope, 'vpc', vpc_id=Fn.import_value(stack_name + 'VpcId'), vpc_cidr_block=Fn.import_value(stack_name + 'VpcCidrBlock'), availability_zones=[ Fn.import_value(stack_name + 'AvailabilityZone0'), Fn.import_value(stack_name + 'AvailabilityZone1'), ], public_subnet_ids=[ Fn.import_value(stack_name + 'PublicSubnetId0'), Fn.import_value(stack_name + 'PublicSubnetId1'), ], isolated_subnet_ids=[ Fn.import_value(stack_name + 'IsolatedSubnet0'), Fn.import_value(stack_name + 'IsolatedSubnet1'), ], )
def _stack_unique_id(self): return Fn.select(2, Fn.split("/", Stack.of(self).stack_id))
def _add_compute_resource_launch_template( self, queue, compute_resource, instance_type, queue_pre_install_action, queue_post_install_action, queue_lt_security_groups, queue_placement_group, ): # LT network interfaces compute_lt_nw_interfaces = [ ec2.CfnLaunchTemplate.NetworkInterfaceProperty( device_index=0, associate_public_ip_address=queue.networking.assign_public_ip if compute_resource.max_network_interface_count == 1 else None, # parameter not supported for instance types with multiple network interfaces interface_type="efa" if compute_resource.efa and compute_resource.efa.enabled else None, groups=queue_lt_security_groups, subnet_id=queue.networking.subnet_ids[0], ) ] for device_index in range( 1, compute_resource.max_network_interface_count): compute_lt_nw_interfaces.append( ec2.CfnLaunchTemplate.NetworkInterfaceProperty( device_index=device_index, network_card_index=device_index, interface_type="efa" if compute_resource.efa and compute_resource.efa.enabled else None, groups=queue_lt_security_groups, subnet_id=queue.networking.subnet_ids[0], )) instance_market_options = None if queue.capacity_type == CapacityType.SPOT: instance_market_options = ec2.CfnLaunchTemplate.InstanceMarketOptionsProperty( market_type="spot", spot_options=ec2.CfnLaunchTemplate.SpotOptionsProperty( spot_instance_type="one-time", instance_interruption_behavior="terminate", max_price=None if compute_resource.spot_price is None else str(compute_resource.spot_price), ), ) ec2.CfnLaunchTemplate( self.stack_scope, f"ComputeServerLaunchTemplate{create_hash_suffix(queue.name + instance_type)}", launch_template_name= f"{self.stack_name}-{queue.name}-{instance_type}", launch_template_data=ec2.CfnLaunchTemplate. LaunchTemplateDataProperty( instance_type=instance_type, cpu_options=ec2.CfnLaunchTemplate.CpuOptionsProperty( core_count=compute_resource.vcpus, threads_per_core=1) if compute_resource.pass_cpu_options_in_launch_template else None, block_device_mappings=get_block_device_mappings( queue.compute_settings.local_storage, self.config.image.os), # key_name=, network_interfaces=compute_lt_nw_interfaces, placement=ec2.CfnLaunchTemplate.PlacementProperty( group_name=queue_placement_group), image_id=self.config.image_dict[queue.name], ebs_optimized=compute_resource.is_ebs_optimized, iam_instance_profile=ec2.CfnLaunchTemplate. IamInstanceProfileProperty( name=self.instance_profiles[queue.name]), instance_market_options=instance_market_options, user_data=Fn.base64( Fn.sub( get_user_data_content( "../resources/compute_node/user_data.sh"), { **{ "EnableEfa": "efa" if compute_resource.efa and compute_resource.efa.enabled else "NONE", "RAIDOptions": get_shared_storage_options_by_type( self.shared_storage_options, SharedStorageType.RAID), "DisableHyperThreadingManually": "true" if compute_resource.disable_simultaneous_multithreading_manually else "false", "BaseOS": self.config.image.os, "PreInstallScript": queue_pre_install_action.script if queue_pre_install_action else "NONE", "PreInstallArgs": join_shell_args(queue_pre_install_action.args) if queue_pre_install_action and queue_pre_install_action.args else "NONE", "PostInstallScript": queue_post_install_action.script if queue_post_install_action else "NONE", "PostInstallArgs": join_shell_args(queue_post_install_action.args) if queue_post_install_action and queue_post_install_action.args else "NONE", "EFSId": get_shared_storage_ids_by_type( self.shared_storage_mappings, SharedStorageType.EFS), "EFSOptions": get_shared_storage_options_by_type( self.shared_storage_options, SharedStorageType.EFS), # FIXME "FSXId": get_shared_storage_ids_by_type( self.shared_storage_mappings, SharedStorageType.FSX), "FSXMountName": self.shared_storage_attributes[SharedStorageType.FSX].get( "MountName", ""), "FSXDNSName": self.shared_storage_attributes[SharedStorageType.FSX].get( "DNSName", ""), "FSXOptions": get_shared_storage_options_by_type( self.shared_storage_options, SharedStorageType.FSX), "Scheduler": self.config.scheduling.scheduler, "EphemeralDir": queue.compute_settings.local_storage.ephemeral_volume.mount_dir if queue.compute_settings and queue.compute_settings.local_storage and queue.compute_settings.local_storage.ephemeral_volume else "/scratch", "EbsSharedDirs": get_shared_storage_options_by_type( self.shared_storage_options, SharedStorageType.EBS), "ClusterDNSDomain": str(self.cluster_hosted_zone.name) if self.cluster_hosted_zone else "", "ClusterHostedZone": str(self.cluster_hosted_zone.ref) if self.cluster_hosted_zone else "", "OSUser": OS_MAPPING[self.config.image.os]["user"], "DynamoDBTable": self.dynamodb_table.ref, "LogGroupName": self.log_group.log_group_name if self.config.monitoring.logs.cloud_watch.enabled else "NONE", "IntelHPCPlatform": "true" if self.config.is_intel_hpc_platform_enabled else "false", "CWLoggingEnabled": "true" if self.config.is_cw_logging_enabled else "false", "QueueName": queue.name, "EnableEfaGdr": "compute" if compute_resource.efa and compute_resource.efa.gdr_support else "NONE", "CustomNodePackage": self.config.custom_node_package or "", "CustomAwsBatchCliPackage": self.config.custom_aws_batch_cli_package or "", "ExtraJson": self.config.extra_chef_attributes, }, **get_common_user_data_env(queue, self.config), }, )), monitoring=ec2.CfnLaunchTemplate.MonitoringProperty( enabled=False), tag_specifications=[ ec2.CfnLaunchTemplate.TagSpecificationProperty( resource_type="instance", tags=get_default_instance_tags( self.stack_name, self.config, compute_resource, "Compute", self.shared_storage_mappings) + [ CfnTag(key=PCLUSTER_QUEUE_NAME_TAG, value=queue.name) ] + get_custom_tags(self.config), ), ec2.CfnLaunchTemplate.TagSpecificationProperty( resource_type="volume", tags=get_default_volume_tags( self.stack_name, "Compute") + [ CfnTag(key=PCLUSTER_QUEUE_NAME_TAG, value=queue.name) ] + get_custom_tags(self.config), ), ], ), )
def _add_default_instance_role(self, cleanup_policy_statements, build_tags): """Set default instance role in imagebuilder cfn template.""" managed_policy_arns = [ Fn.sub( "arn:${AWS::Partition}:iam::aws:policy/AmazonSSMManagedInstanceCore" ), Fn.sub( "arn:${AWS::Partition}:iam::aws:policy/EC2InstanceProfileForImageBuilder" ), ] if self.config.build.iam and self.config.build.iam.additional_iam_policies: for policy in self.config.build.iam.additional_iam_policy_arns: managed_policy_arns.append(policy) instancerole_policy_document = iam.PolicyDocument(statements=[ iam.PolicyStatement( effect=iam.Effect.ALLOW, resources=[ self.format_arn( service="ec2", account="", resource="image", resource_name="*", ) ], actions=["ec2:CreateTags", "ec2:ModifyImageAttribute"], ) ]) if self.config.build.components: for custom_component in self.config.build.components: # Check custom component is script, and the url is S3 url if custom_component.type == "script" and utils.get_url_scheme( custom_component.value) == "s3": bucket_info = parse_bucket_url(custom_component.value) bucket_name = bucket_info.get("bucket_name") object_key = bucket_info.get("object_key") instancerole_policy_document.add_statements( iam.PolicyStatement( actions=["s3:GetObject"], effect=iam.Effect.ALLOW, resources=[ self.format_arn( region="", service="s3", account="", resource=bucket_name, resource_name=object_key, ) ], ), ) instancerole_policy = iam.CfnRole.PolicyProperty( policy_name="InstanceRoleInlinePolicy", policy_document=instancerole_policy_document, ) instance_role_resource = iam.CfnRole( self, "InstanceRole", path=IAM_ROLE_PATH, managed_policy_arns=managed_policy_arns, assume_role_policy_document=get_assume_role_policy_document( "ec2.{0}".format(self.url_suffix)), policies=[ instancerole_policy, ], tags=build_tags, role_name=self._build_resource_name( IMAGEBUILDER_RESOURCE_NAME_PREFIX), ) if not self.custom_cleanup_lambda_role: self._add_resource_delete_policy( cleanup_policy_statements, ["iam:DeleteRole"], [ self.format_arn( service="iam", region="", resource="role", resource_name="{0}/{1}".format( IAM_ROLE_PATH.strip("/"), self._build_resource_name( IMAGEBUILDER_RESOURCE_NAME_PREFIX), ), ) ], ) return instance_role_resource
def _add_lambda_cleanup(self, policy_statements, build_tags): lambda_cleanup_execution_role = None if self.custom_cleanup_lambda_role: execution_role = self.custom_cleanup_lambda_role else: # LambdaCleanupPolicies self._add_resource_delete_policy( policy_statements, ["cloudformation:DeleteStack"], [ self.format_arn( service="cloudformation", resource="stack", resource_name="{0}/{1}".format( self.image_id, self._stack_unique_id()), ) ], ) self._add_resource_delete_policy( policy_statements, ["ec2:CreateTags"], [ self.format_arn( service="ec2", account="", resource="image", region=region, resource_name="*", ) for region in self._get_distribution_regions() ], ) self._add_resource_delete_policy( policy_statements, ["tag:TagResources"], ["*"], ) self._add_resource_delete_policy( policy_statements, [ "iam:DetachRolePolicy", "iam:DeleteRole", "iam:DeleteRolePolicy" ], [ self.format_arn( service="iam", resource="role", region="", resource_name="{0}/{1}".format( IAM_ROLE_PATH.strip("/"), self._build_resource_name( IMAGEBUILDER_RESOURCE_NAME_PREFIX + "Cleanup"), ), ) ], ) self._add_resource_delete_policy( policy_statements, ["lambda:DeleteFunction", "lambda:RemovePermission"], [ self.format_arn( service="lambda", resource="function", sep=":", resource_name=self._build_resource_name( IMAGEBUILDER_RESOURCE_NAME_PREFIX), ) ], ) self._add_resource_delete_policy( policy_statements, ["logs:DeleteLogGroup"], [ self.format_arn( service="logs", resource="log-group", sep=":", resource_name="/aws/lambda/{0}:*".format( self._build_resource_name( IMAGEBUILDER_RESOURCE_NAME_PREFIX)), ) ], ) self._add_resource_delete_policy( policy_statements, ["iam:RemoveRoleFromInstanceProfile"], [ self.format_arn( service="iam", resource="instance-profile", region="", resource_name="{0}/{1}".format( IAM_ROLE_PATH.strip("/"), self._build_resource_name( IMAGEBUILDER_RESOURCE_NAME_PREFIX), ), ) ], ) self._add_resource_delete_policy( policy_statements, ["iam:DetachRolePolicy", "iam:DeleteRolePolicy"], [ self.format_arn( service="iam", resource="role", region="", resource_name="{0}/{1}".format( IAM_ROLE_PATH.strip("/"), self._build_resource_name( IMAGEBUILDER_RESOURCE_NAME_PREFIX), ), ) ], ) self._add_resource_delete_policy( policy_statements, [ "SNS:GetTopicAttributes", "SNS:DeleteTopic", "SNS:Unsubscribe" ], [ self.format_arn( service="sns", resource="{0}".format( self._build_resource_name( IMAGEBUILDER_RESOURCE_NAME_PREFIX)), ) ], ) policy_document = iam.PolicyDocument(statements=policy_statements) managed_lambda_policy = [ Fn.sub( "arn:${AWS::Partition}:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" ), ] # LambdaCleanupExecutionRole lambda_cleanup_execution_role = iam.CfnRole( self, "DeleteStackFunctionExecutionRole", managed_policy_arns=managed_lambda_policy, assume_role_policy_document=get_assume_role_policy_document( "lambda.amazonaws.com"), path=IAM_ROLE_PATH, policies=[ iam.CfnRole.PolicyProperty( policy_document=policy_document, policy_name="LambdaCleanupPolicy", ), ], tags=build_tags, role_name=self._build_resource_name( IMAGEBUILDER_RESOURCE_NAME_PREFIX + "Cleanup"), ) execution_role = lambda_cleanup_execution_role.attr_arn # LambdaCleanupEnv lambda_env = awslambda.CfnFunction.EnvironmentProperty( variables={"IMAGE_STACK_ARN": self.stack_id}) # LambdaCWLogGroup lambda_log = logs.CfnLogGroup( self, "DeleteStackFunctionLog", log_group_name="/aws/lambda/{0}".format( self._build_resource_name(IMAGEBUILDER_RESOURCE_NAME_PREFIX)), ) # LambdaCleanupFunction lambda_cleanup = awslambda.CfnFunction( self, "DeleteStackFunction", function_name=self._build_resource_name( IMAGEBUILDER_RESOURCE_NAME_PREFIX), code=awslambda.CfnFunction.CodeProperty( s3_bucket=self.config.custom_s3_bucket or S3Bucket.get_bucket_name( AWSApi.instance().sts.get_account_id(), get_region()), s3_key=self.bucket.get_object_key(S3FileType.CUSTOM_RESOURCES, "artifacts.zip"), ), handler="delete_image_stack.handler", memory_size=128, role=execution_role, runtime="python3.8", timeout=900, environment=lambda_env, tags=build_tags, ) permission = awslambda.CfnPermission( self, "DeleteStackFunctionPermission", action="lambda:InvokeFunction", principal="sns.amazonaws.com", function_name=lambda_cleanup.attr_arn, source_arn=Fn.ref("BuildNotificationTopic"), ) lambda_cleanup.add_depends_on(lambda_log) return lambda_cleanup, permission, lambda_cleanup_execution_role, lambda_log
def _add_imagebuilder_components(self, build_tags, lambda_cleanup_policy_statements): imagebuilder_resources_dir = os.path.join( imagebuilder_utils.get_resources_directory(), "imagebuilder") # ImageBuilderComponents components = [] components_resources = [] if self.config.build and self.config.build.update_os_packages and self.config.build.update_os_packages.enabled: update_os_component_resource = imagebuilder.CfnComponent( self, "UpdateOSComponent", name=self._build_resource_name( IMAGEBUILDER_RESOURCE_NAME_PREFIX + "-UpdateOS"), version=utils.get_installed_version(base_version_only=True), tags=build_tags, description="Update OS and Reboot", platform="Linux", data=Fn.sub( _load_yaml(imagebuilder_resources_dir, "update_and_reboot.yaml")), ) components.append( imagebuilder.CfnImageRecipe.ComponentConfigurationProperty( component_arn=Fn.ref("UpdateOSComponent"))) components_resources.append(update_os_component_resource) if not self.custom_cleanup_lambda_role: self._add_resource_delete_policy( lambda_cleanup_policy_statements, ["imagebuilder:DeleteComponent"], [ self.format_arn( service="imagebuilder", resource="component", resource_name="{0}/*".format( self._build_resource_name( IMAGEBUILDER_RESOURCE_NAME_PREFIX + "-UpdateOS", to_lower=True)), ) ], ) disable_pcluster_component = ( self.config.dev_settings.disable_pcluster_component if self.config.dev_settings and self.config.dev_settings.disable_pcluster_component else False) if not disable_pcluster_component: parallelcluster_component_resource = imagebuilder.CfnComponent( self, "ParallelClusterComponent", name=self._build_resource_name( IMAGEBUILDER_RESOURCE_NAME_PREFIX), version=utils.get_installed_version(base_version_only=True), tags=build_tags, description="Install ParallelCluster software stack", platform="Linux", data=Fn.sub( _load_yaml(imagebuilder_resources_dir, "parallelcluster.yaml")), ) components.append( imagebuilder.CfnImageRecipe.ComponentConfigurationProperty( component_arn=Fn.ref("ParallelClusterComponent"))) components_resources.append(parallelcluster_component_resource) if not self.custom_cleanup_lambda_role: self._add_resource_delete_policy( lambda_cleanup_policy_statements, ["imagebuilder:DeleteComponent"], [ self.format_arn( service="imagebuilder", resource="component", resource_name="{0}/*".format( self._build_resource_name( IMAGEBUILDER_RESOURCE_NAME_PREFIX, to_lower=True)), ) ], ) tag_component_resource = imagebuilder.CfnComponent( self, "ParallelClusterTagComponent", name=self._build_resource_name(IMAGEBUILDER_RESOURCE_NAME_PREFIX + "-Tag"), version=utils.get_installed_version(base_version_only=True), tags=build_tags, description="Tag ParallelCluster AMI", platform="Linux", data=_load_yaml(imagebuilder_resources_dir, "parallelcluster_tag.yaml"), ) components.append( imagebuilder.CfnImageRecipe.ComponentConfigurationProperty( component_arn=Fn.ref("ParallelClusterTagComponent"))) components_resources.append(tag_component_resource) if not self.custom_cleanup_lambda_role: self._add_resource_delete_policy( lambda_cleanup_policy_statements, ["imagebuilder:DeleteComponent"], [ self.format_arn( service="imagebuilder", resource="component", resource_name="{0}/*".format( self._build_resource_name( IMAGEBUILDER_RESOURCE_NAME_PREFIX + "-Tag", to_lower=True)), ) ], ) if self.config.build.components: self._add_custom_components(components, lambda_cleanup_policy_statements, components_resources) disable_validate_and_test_component = ( self.config.dev_settings.disable_validate_and_test if self.config.dev_settings and self.config.dev_settings.disable_validate_and_test else False) if not disable_pcluster_component and not disable_validate_and_test_component: validate_component_resource = imagebuilder.CfnComponent( self, id="ParallelClusterValidateComponent", name=self._build_resource_name( IMAGEBUILDER_RESOURCE_NAME_PREFIX + "-Validate"), version=utils.get_installed_version(base_version_only=True), tags=build_tags, description="Validate ParallelCluster AMI", platform="Linux", data=_load_yaml(imagebuilder_resources_dir, "parallelcluster_validate.yaml"), ) components.append( imagebuilder.CfnImageRecipe.ComponentConfigurationProperty( component_arn=Fn.ref("ParallelClusterValidateComponent"))) components_resources.append(validate_component_resource) if not self.custom_cleanup_lambda_role: self._add_resource_delete_policy( lambda_cleanup_policy_statements, ["imagebuilder:DeleteComponent"], [ self.format_arn( service="imagebuilder", resource="component", resource_name="{0}/*".format( self._build_resource_name( IMAGEBUILDER_RESOURCE_NAME_PREFIX + "-Validate", to_lower=True)), ) ], ) test_component_resource = imagebuilder.CfnComponent( self, id="ParallelClusterTestComponent", name=self._build_resource_name( IMAGEBUILDER_RESOURCE_NAME_PREFIX + "-Test"), version=utils.get_installed_version(base_version_only=True), tags=build_tags, description="Test ParallelCluster AMI", platform="Linux", data=_load_yaml(imagebuilder_resources_dir, "parallelcluster_test.yaml"), ) components.append( imagebuilder.CfnImageRecipe.ComponentConfigurationProperty( component_arn=Fn.ref("ParallelClusterTestComponent"))) components_resources.append(test_component_resource) if not self.custom_cleanup_lambda_role: self._add_resource_delete_policy( lambda_cleanup_policy_statements, ["imagebuilder:DeleteComponent"], [ self.format_arn( service="imagebuilder", resource="component", resource_name="{0}/*".format( self._build_resource_name( IMAGEBUILDER_RESOURCE_NAME_PREFIX + "-Test", to_lower=True)), ) ], ) return components, components_resources
def __init__(self, scope: Construct, id: str, vpc: _ec2.IVpc, codebucket: IBucket, s3_deploy, metrics) -> None: super().__init__(scope, id) self._metrics_mapping = CfnMapping( self, 'AnonymousData', mapping={'SendAnonymousData': { 'Data': 'Yes' }}) self._metrics_condition = CfnCondition( self, 'AnonymousDatatoAWS', expression=Fn.condition_equals( self._metrics_mapping.find_in_map('SendAnonymousData', 'Data'), 'Yes')) self._helper_func = _lambda.SingletonFunction( self, 'SolutionHelper', uuid='75248a81-9138-468c-9ba1-bca6c7137599', runtime=_lambda.Runtime.PYTHON_3_8, handler='lambda_function.handler', description= 'This function generates UUID for each deployment and sends anonymous data to the AWS Solutions team', code=_lambda.Code.from_bucket(bucket=codebucket, key='app_code/solution_helper.zip'), vpc=vpc, timeout=Duration.seconds(30)) self._helper_func.add_dependency(s3_deploy) self._lambda_provider = _custom_resources.Provider( self, 'LambdaProvider', on_event_handler=self._helper_func, vpc=vpc) self._uuid = CustomResource( self, 'UUIDCustomResource', service_token=self._lambda_provider.service_token, properties={"Resource": "UUID"}, resource_type="Custom::CreateUUID", removal_policy=RemovalPolicy.DESTROY) self._send_data = CustomResource( self, 'SendDataCustomResource', service_token=self._lambda_provider.service_token, properties={ "Resource": "AnonymousMetric", "UUID": self._uuid.get_att_string("UUID"), "Solution": metrics["Solution"], "Data": metrics }, resource_type='Custom::AnonymousData', removal_policy=RemovalPolicy.DESTROY) self._send_data.node.add_dependency(self._uuid) Aspects.of(self._helper_func).add(Condition(self._metrics_condition)) Aspects.of(self._uuid).add(Condition(self._metrics_condition)) Aspects.of(self._send_data).add(Condition(self._metrics_condition))
def _get_compute_env_prefix(self): return Fn.select( 1, Fn.split("compute-environment/", self._compute_env.ref))
def __init__( self, scope: Construct, construct_id: str, *, deploy_env: str, processing_assets_table: aws_dynamodb.Table, ): # pylint: disable=too-many-locals super().__init__(scope, construct_id) if deploy_env == "prod": instance_types = [ aws_ec2.InstanceType("c5.xlarge"), aws_ec2.InstanceType("c5.2xlarge"), aws_ec2.InstanceType("c5.4xlarge"), aws_ec2.InstanceType("c5.9xlarge"), ] else: instance_types = [ aws_ec2.InstanceType("m5.large"), aws_ec2.InstanceType("m5.xlarge"), ] ec2_policy = aws_iam.ManagedPolicy.from_aws_managed_policy_name( "service-role/AmazonEC2ContainerServiceforEC2Role") batch_instance_role = aws_iam.Role( self, "batch-instance-role", assumed_by=aws_iam.ServicePrincipal( "ec2.amazonaws.com"), # type: ignore[arg-type] managed_policies=[ec2_policy], ) processing_assets_table.grant_read_write_data( batch_instance_role) # type: ignore[arg-type] batch_instance_profile = aws_iam.CfnInstanceProfile( self, "batch-instance-profile", roles=[batch_instance_role.role_name], ) batch_launch_template_data = textwrap.dedent(""" MIME-Version: 1.0 Content-Type: multipart/mixed; boundary="==MYBOUNDARY==" --==MYBOUNDARY== Content-Type: text/x-shellscript; charset="us-ascii" #!/bin/bash echo ECS_IMAGE_PULL_BEHAVIOR=prefer-cached >> /etc/ecs/ecs.config --==MYBOUNDARY==-- """) launch_template_data = aws_ec2.CfnLaunchTemplate.LaunchTemplateDataProperty( user_data=Fn.base64(batch_launch_template_data.strip())) cloudformation_launch_template = aws_ec2.CfnLaunchTemplate( self, "batch-launch-template", launch_template_name=f"{deploy_env}-datalake-batch-launch-template", launch_template_data=launch_template_data, ) assert cloudformation_launch_template.launch_template_name is not None launch_template = aws_batch.LaunchTemplateSpecification( launch_template_name=cloudformation_launch_template. launch_template_name) # use existing VPC in LINZ AWS account. # VPC with these tags is required to exist in AWS account before being deployed. # A VPC will not be deployed by this project. vpc = aws_ec2.Vpc.from_lookup( self, "datalake-vpc", tags={ APPLICATION_NAME_TAG_NAME: APPLICATION_NAME, "ApplicationLayer": "networking", }, ) compute_resources = aws_batch.ComputeResources( vpc=vpc, minv_cpus=0, desiredv_cpus=0, maxv_cpus=1000, instance_types=instance_types, instance_role=batch_instance_profile.instance_profile_name, allocation_strategy=aws_batch.AllocationStrategy( "BEST_FIT_PROGRESSIVE"), launch_template=launch_template, ) batch_service_policy = aws_iam.ManagedPolicy.from_aws_managed_policy_name( "service-role/AWSBatchServiceRole") service_role = aws_iam.Role( self, "batch-service-role", assumed_by=aws_iam.ServicePrincipal( "batch.amazonaws.com"), # type: ignore[arg-type] managed_policies=[batch_service_policy], ) compute_environment = aws_batch.ComputeEnvironment( self, "compute-environment", compute_resources=compute_resources, service_role=service_role, # type: ignore[arg-type] ) self.job_queue = aws_batch.JobQueue( scope, f"{construct_id}-job-queue", compute_environments=[ aws_batch.JobQueueComputeEnvironment( compute_environment=compute_environment, order=10 # type: ignore[arg-type] ), ], priority=10, )
def __init__(self, app: App, id: str, **kwargs) -> None: super().__init__(app, id, **kwargs) self.template_options.description = "(SO0123) Improving Forecast Accuracy with Machine Learning %%VERSION%% - This solution provides a mechanism to automate Amazon Forecast predictor and forecast generation and visualize it via an Amazon SageMaker Jupyter Notebook" # set up the template parameters email = CfnParameter( self, id="Email", type="String", description="Email to notify with forecast results", default="", max_length=50, allowed_pattern= r"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$|^$)", constraint_description="Must be a valid email address or blank", ) lambda_log_level = CfnParameter( self, id="LambdaLogLevel", type="String", description="Change the verbosity of the logs output to CloudWatch", default="WARNING", allowed_values=["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"], ) notebook_deploy = CfnParameter( self, id="NotebookDeploy", type="String", description="Deploy an Amazon SageMaker Jupyter Notebook instance", default="No", allowed_values=["Yes", "No"], ) notebook_volume_size = CfnParameter( self, id="NotebookVolumeSize", type="Number", description= "Enter the size of the notebook instance EBS volume in GB", default=10, min_value=5, max_value=16384, constraint_description= "Must be an integer between 5 (GB) and 16384 (16 TB)", ) notebook_instance_type = CfnParameter( self, id="NotebookInstanceType", type="String", description="Enter the type of the notebook instance", default="ml.t2.medium", allowed_values=[ "ml.t2.medium", "ml.t3.medium", "ml.r5.large", "ml.c5.large", ], ) quicksight_analysis_owner = CfnParameter( self, id="QuickSightAnalysisOwner", description= "With QuickSight Enterprise enabled, provide a QuickSight ADMIN user ARN to automatically create QuickSight analyses", default="", allowed_pattern="(^arn:.*:quicksight:.*:.*:user.*$|^$)", ) # set up the metadata/ cloudformation interface template_options = TemplateOptions() template_options.add_parameter_group( label= "Improving Forecast Accuracy with Machine Learning Configuration", parameters=[email], ) template_options.add_parameter_group( label="Visualization Options", parameters=[ quicksight_analysis_owner, notebook_deploy, notebook_instance_type, notebook_volume_size, ], ) template_options.add_parameter_group(label="Deployment Configuration", parameters=[lambda_log_level]) template_options.add_parameter_label(email, "Email") template_options.add_parameter_label(lambda_log_level, "CloudWatch Log Level") template_options.add_parameter_label(notebook_deploy, "Deploy Jupyter Notebook") template_options.add_parameter_label(notebook_volume_size, "Jupyter Notebook volume size") template_options.add_parameter_label(notebook_instance_type, "Jupyter Notebook instance type") template_options.add_parameter_label(quicksight_analysis_owner, "Deploy QuickSight Dashboards") self.template_options.metadata = template_options.metadata solution_mapping = CfnMapping( self, "Solution", mapping={ "Data": { "ID": "SO0123", "Version": "%%VERSION%%", "SendAnonymousUsageData": "Yes", } }, ) source_mapping = CfnMapping( self, "SourceCode", mapping={ "General": { "S3Bucket": "%%BUCKET_NAME%%", "KeyPrefix": "%%SOLUTION_NAME%%/%%VERSION%%", "QuickSightSourceTemplateArn": "%%QUICKSIGHT_SOURCE%%", } }, ) # conditions create_notebook = CfnCondition( self, "CreateNotebook", expression=Fn.condition_equals(notebook_deploy, "Yes"), ) email_provided = CfnCondition( self, "EmailProvided", expression=Fn.condition_not(Fn.condition_equals(email, "")), ) send_anonymous_usage_data = CfnCondition( self, "SendAnonymousUsageData", expression=Fn.condition_equals( Fn.find_in_map("Solution", "Data", "SendAnonymousUsageData"), "Yes"), ) create_analysis = CfnCondition( self, "CreateAnalysis", expression=Fn.condition_not( Fn.condition_equals(quicksight_analysis_owner, ""), ), ) # Step function and state machine fns = LambdaFunctions(self, "Functions", log_level=lambda_log_level) # SNS notifications = Notifications( self, "NotificationConfiguration", lambda_function=fns.functions["SNS"], email=email, email_provided=email_provided, ) # Custom Resources unique_name = CfnResource( self, "UniqueName", type="Custom::UniqueName", properties={ "ServiceToken": fns.functions["CfnResourceUniqueName"].function_arn }, ) unique_name.override_logical_id("UniqueName") data_bucket_name_resource = CfnResource( self, "DataBucketName", type="Custom::BucketName", properties={ "ServiceToken": fns.functions["CfnResourceBucketName"].function_arn, "BucketPurpose": "data-bucket", "StackName": Aws.STACK_NAME, "Id": unique_name.get_att("Id"), }, ) data_bucket_name_resource.override_logical_id("DataBucketName") # Buckets access_logs_bucket = self.secure_bucket( "AccessLogsBucket", suppressions=[ CfnNagSuppression( "W35", "This bucket is used as the logging destination for forecast datasets and exports", ) ], access_control=BucketAccessControl.LOG_DELIVERY_WRITE, ) athena_bucket = self.secure_bucket( "AthenaBucket", server_access_logs_bucket=access_logs_bucket, server_access_logs_prefix="athena-bucket-access-logs/", ) data_bucket = self.secure_bucket( "ForecastBucket", lifecycle_rules=[ LifecycleRule( abort_incomplete_multipart_upload_after=Duration.days(3), enabled=True, ), LifecycleRule(expiration=Duration.days(1), prefix="raw/", enabled=True), ], bucket_name=data_bucket_name_resource.get_att("Name").to_string(), server_access_logs_bucket=access_logs_bucket, server_access_logs_prefix="forecast-bucket-access-logs/", ) data_bucket.node.default_child.add_property_override( "NotificationConfiguration", { "LambdaConfigurations": [{ "Function": fns.functions["S3NotificationLambda"].function_arn, "Event": "s3:ObjectCreated:*", "Filter": { "S3Key": { "Rules": [ { "Name": "prefix", "Value": "train/" }, { "Name": "suffix", "Value": ".csv" }, ] } }, }] }, ) # Glue and Athena glue = Glue(self, "GlueResources", unique_name) athena = Athena(self, "AthenaResources", athena_bucket=athena_bucket) # Configure permissions for functions fns.set_s3_notification_permissions(data_bucket_name_resource) fns.set_forecast_s3_access_permissions( name="DatasetImport", function=fns.functions["CreateDatasetImportJob"], data_bucket_name_resource=data_bucket_name_resource, ) fns.set_forecast_s3_access_permissions( name="ForecastExport", function=fns.functions["CreateForecast"], data_bucket_name_resource=data_bucket_name_resource, ) fns.set_forecast_etl_permissions( function=fns.functions["PrepareForecastExport"], database=glue.database, workgroup=athena.workgroup, quicksight_principal=quicksight_analysis_owner, quicksight_source=source_mapping, athena_bucket=athena_bucket, data_bucket_name_resource=data_bucket_name_resource, ) fns.set_forecast_permissions( "CreateDatasetGroup", data_bucket_name_resource=data_bucket_name_resource) fns.set_forecast_permissions( "CreateDatasetImportJob", data_bucket_name_resource=data_bucket_name_resource, ) fns.set_forecast_permissions( "CreateForecast", data_bucket_name_resource=data_bucket_name_resource) fns.set_forecast_permissions( "CreatePredictor", data_bucket_name_resource=data_bucket_name_resource) fns.set_forecast_permissions( "PrepareForecastExport", data_bucket_name_resource=data_bucket_name_resource) # notebook (conditional on 'create_notebook') notebook = Notebook( self, "Notebook", buckets=[data_bucket], instance_type=notebook_instance_type.value_as_string, instance_volume_size=notebook_volume_size.value_as_number, notebook_path=Path(__file__).parent.parent.parent.joinpath( "notebook", "samples", "notebooks"), notebook_destination_bucket=data_bucket, notebook_destination_prefix="notebooks", ) Aspects.of(notebook).add(ConditionalResources(create_notebook)) # solutions metrics (conditional on 'send_anonymous_usage_data') metrics = Metrics( self, "SolutionMetrics", metrics_function=fns.functions["CfnResourceSolutionMetrics"], metrics={ "Solution": solution_mapping.find_in_map("Data", "ID"), "Version": solution_mapping.find_in_map("Data", "Version"), "Region": Aws.REGION, "NotebookDeployed": Fn.condition_if(create_notebook.node.id, "Yes", "No"), "NotebookType": Fn.condition_if( create_notebook.node.id, notebook_instance_type.value_as_string, Aws.NO_VALUE, ), "QuickSightDeployed": Fn.condition_if(create_analysis.node.id, "Yes", "No"), }, ) Aspects.of(metrics).add( ConditionalResources(send_anonymous_usage_data)) # outputs CfnOutput(self, "ForecastBucketName", value=data_bucket.bucket_name) CfnOutput(self, "AthenaBucketName", value=athena_bucket.bucket_name) CfnOutput(self, "StepFunctionsName", value=fns.state_machine.state_machine_name)
def __init__(self, scope: core.Construct, id: str, application_prefix: str, suffix: str, kda_role: Role, **kwargs): super().__init__(scope, id, **kwargs) stack = Stack.of(self) region = stack.region # Create Cognito User Pool self.__user_pool = CfnUserPool( scope=self, id='UserPool', admin_create_user_config={'allowAdminCreateUserOnly': True}, policies={'passwordPolicy': { 'minimumLength': 8 }}, username_attributes=['email'], auto_verified_attributes=['email'], user_pool_name=application_prefix + '_user_pool') # Create a Cognito User Pool Domain using the newly created Cognito User Pool CfnUserPoolDomain(scope=self, id='CognitoDomain', domain=application_prefix + '-' + suffix, user_pool_id=self.user_pool.ref) # Create Cognito Identity Pool self.__id_pool = CfnIdentityPool( scope=self, id='IdentityPool', allow_unauthenticated_identities=False, cognito_identity_providers=[], identity_pool_name=application_prefix + '_identity_pool') trust_relationship = FederatedPrincipal( federated='cognito-identity.amazonaws.com', conditions={ 'StringEquals': { 'cognito-identity.amazonaws.com:aud': self.id_pool.ref }, 'ForAnyValue:StringLike': { 'cognito-identity.amazonaws.com:amr': 'authenticated' } }, assume_role_action='sts:AssumeRoleWithWebIdentity') # IAM role for master user master_auth_role = Role(scope=self, id='MasterAuthRole', assumed_by=trust_relationship) # Role for authenticated user limited_auth_role = Role(scope=self, id='LimitedAuthRole', assumed_by=trust_relationship) # Attach Role to Identity Pool CfnIdentityPoolRoleAttachment( scope=self, id='userPoolRoleAttachment', identity_pool_id=self.id_pool.ref, roles={'authenticated': limited_auth_role.role_arn}) # Create master-user-group CfnUserPoolGroup(scope=self, id='AdminsGroup', user_pool_id=self.user_pool.ref, group_name='master-user-group', role_arn=master_auth_role.role_arn) # Create limited-user-group CfnUserPoolGroup(scope=self, id='UsersGroup', user_pool_id=self.user_pool.ref, group_name='limited-user-group', role_arn=limited_auth_role.role_arn) # Role for the Elasticsearch service to access Cognito es_role = Role(scope=self, id='EsRole', assumed_by=ServicePrincipal(service='es.amazonaws.com'), managed_policies=[ ManagedPolicy.from_aws_managed_policy_name( 'AmazonESCognitoAccess') ]) # Use the following command line to generate the python dependencies layer content # pip3 install -t lambda-layer/python/lib/python3.8/site-packages -r lambda/requirements.txt # Build the lambda layer assets subprocess.call([ 'pip', 'install', '-t', 'streaming/streaming_cdk/lambda-layer/python/lib/python3.8/site-packages', '-r', 'streaming/streaming_cdk/bootstrap-lambda/requirements.txt', '--upgrade' ]) requirements_layer = _lambda.LayerVersion( scope=self, id='PythonRequirementsTemplate', code=_lambda.Code.from_asset( 'streaming/streaming_cdk/lambda-layer'), compatible_runtimes=[_lambda.Runtime.PYTHON_3_8]) # This lambda function will bootstrap the Elasticsearch cluster bootstrap_function_name = 'AESBootstrap' register_template_lambda = _lambda.Function( scope=self, id='RegisterTemplate', runtime=_lambda.Runtime.PYTHON_3_8, code=_lambda.Code.from_asset( 'streaming/streaming_cdk/bootstrap-lambda'), handler='es-bootstrap.lambda_handler', environment={ 'REGION': region, 'KDA_ROLE_ARN': kda_role.role_arn, 'MASTER_ROLE_ARN': master_auth_role.role_arn }, layers=[requirements_layer], timeout=Duration.minutes(15), function_name=bootstrap_function_name) lambda_role = register_template_lambda.role lambda_role.add_to_policy( PolicyStatement( actions=['logs:CreateLogGroup'], resources=[stack.format_arn(service='logs', resource='*')])) lambda_role.add_to_policy( PolicyStatement( actions=['logs:CreateLogStream', 'logs:PutLogEvents'], resources=[ stack.format_arn(service='logs', resource='log_group', resource_name='/aws/lambda/' + bootstrap_function_name + ':*') ])) # Let the lambda assume the master role so that actions can be executed on the cluster # https://aws.amazon.com/premiumsupport/knowledge-center/lambda-function-assume-iam-role/ lambda_role.add_to_policy( PolicyStatement(actions=['sts:AssumeRole'], resources=[master_auth_role.role_arn])) master_auth_role.assume_role_policy.add_statements( PolicyStatement(actions=['sts:AssumeRole'], principals=[lambda_role])) # List all the roles that are allowed to access the Elasticsearch cluster. roles = [ ArnPrincipal(limited_auth_role.role_arn), ArnPrincipal(master_auth_role.role_arn), ArnPrincipal(kda_role.role_arn) ] # The users if register_template_lambda and register_template_lambda.role: roles.append(ArnPrincipal( lambda_role.role_arn)) # The lambda used to bootstrap # Create kms key kms_key = Key(scope=self, id='kms-es', alias='custom/es', description='KMS key for Elasticsearch domain', enable_key_rotation=True) # AES Log Groups es_app_log_group = logs.LogGroup(scope=self, id='EsAppLogGroup', retention=logs.RetentionDays.ONE_WEEK, removal_policy=RemovalPolicy.RETAIN) # Create the Elasticsearch domain es_domain_arn = stack.format_arn(service='es', resource='domain', resource_name=application_prefix + '/*') es_access_policy = PolicyDocument(statements=[ PolicyStatement(principals=roles, actions=[ 'es:ESHttpGet', 'es:ESHttpPut', 'es:ESHttpPost', 'es:ESHttpDelete' ], resources=[es_domain_arn]) ]) self.__es_domain = es.CfnDomain( scope=self, id='searchDomain', elasticsearch_cluster_config={ 'instanceType': 'r5.large.elasticsearch', 'instanceCount': 2, 'dedicatedMasterEnabled': True, 'dedicatedMasterCount': 3, 'dedicatedMasterType': 'r5.large.elasticsearch', 'zoneAwarenessEnabled': True, 'zoneAwarenessConfig': { 'AvailabilityZoneCount': '2' }, }, encryption_at_rest_options={ 'enabled': True, 'kmsKeyId': kms_key.key_id }, node_to_node_encryption_options={'enabled': True}, ebs_options={ 'volumeSize': 10, 'ebsEnabled': True }, elasticsearch_version='7.9', domain_name=application_prefix, access_policies=es_access_policy, cognito_options={ 'enabled': True, 'identityPoolId': self.id_pool.ref, 'roleArn': es_role.role_arn, 'userPoolId': self.user_pool.ref }, advanced_security_options={ 'enabled': True, 'internalUserDatabaseEnabled': False, 'masterUserOptions': { 'masterUserArn': master_auth_role.role_arn } }, domain_endpoint_options={ 'enforceHttps': True, 'tlsSecurityPolicy': 'Policy-Min-TLS-1-2-2019-07' }, # log_publishing_options={ # # 'ES_APPLICATION_LOGS': { # # 'enabled': True, # # 'cloud_watch_logs_log_group_arn': es_app_log_group.log_group_arn # # }, # # 'AUDIT_LOGS': { # # 'enabled': True, # # 'cloud_watch_logs_log_group_arn': '' # # }, # # 'SEARCH_SLOW_LOGS': { # # 'enabled': True, # # 'cloud_watch_logs_log_group_arn': '' # # }, # # 'INDEX_SLOW_LOGS': { # # 'enabled': True, # # 'cloud_watch_logs_log_group_arn': '' # # } # } ) # Not yet on the roadmap... # See https://github.com/aws-cloudformation/aws-cloudformation-coverage-roadmap/issues/283 # self.es_domain.add_property_override('ElasticsearchClusterConfig.WarmEnabled', True) # self.es_domain.add_property_override('ElasticsearchClusterConfig.WarmCount', 2) # self.es_domain.add_property_override('ElasticsearchClusterConfig.WarmType', 'ultrawarm1.large.elasticsearch') # Deny all roles from the authentication provider - users must be added to groups # This lambda function will bootstrap the Elasticsearch cluster cognito_function_name = 'CognitoFix' cognito_template_lambda = _lambda.Function( scope=self, id='CognitoFixLambda', runtime=_lambda.Runtime.PYTHON_3_8, code=_lambda.Code.from_asset( 'streaming/streaming_cdk/cognito-lambda'), handler='handler.handler', environment={ 'REGION': scope.region, 'USER_POOL_ID': self.__user_pool.ref, 'IDENTITY_POOL_ID': self.__id_pool.ref, 'LIMITED_ROLE_ARN': limited_auth_role.role_arn }, timeout=Duration.minutes(15), function_name=cognito_function_name) lambda_role = cognito_template_lambda.role lambda_role.add_to_policy( PolicyStatement( actions=['logs:CreateLogGroup'], resources=[stack.format_arn(service='logs', resource='*')])) lambda_role.add_to_policy( PolicyStatement( actions=['logs:CreateLogStream', 'logs:PutLogEvents'], resources=[ stack.format_arn(service='logs', resource='log_group', resource_name='/aws/lambda/' + cognito_function_name + ':*') ])) lambda_role.add_to_policy( PolicyStatement(actions=['cognito-idp:ListUserPoolClients'], resources=[self.user_pool.attr_arn])) lambda_role.add_to_policy( PolicyStatement(actions=['iam:PassRole'], resources=[limited_auth_role.role_arn])) cognito_id_res = Fn.join(':', [ 'arn:aws:cognito-identity', scope.region, scope.account, Fn.join('/', ['identitypool', self.__id_pool.ref]) ]) lambda_role.add_to_policy( PolicyStatement(actions=['cognito-identity:SetIdentityPoolRoles'], resources=[cognito_id_res])) # Get the Domain Endpoint and register it with the lambda as environment variable. register_template_lambda.add_environment( 'DOMAIN', self.__es_domain.attr_domain_endpoint) CfnOutput(scope=self, id='createUserUrl', description="Create a new user in the user pool here.", value="https://" + scope.region + ".console.aws.amazon.com/cognito/users?region=" + scope.region + "#/pool/" + self.user_pool.ref + "/users") CfnOutput(scope=self, id='kibanaUrl', description="Access Kibana via this URL.", value="https://" + self.__es_domain.attr_domain_endpoint + "/_plugin/kibana/") bootstrap_lambda_provider = Provider( scope=self, id='BootstrapLambdaProvider', on_event_handler=register_template_lambda) CustomResource(scope=self, id='ExecuteRegisterTemplate', service_token=bootstrap_lambda_provider.service_token, properties={'Timeout': 900}) cognito_lambda_provider = Provider( scope=self, id='CognitoFixLambdaProvider', on_event_handler=cognito_template_lambda) cognito_fix_resource = CustomResource( scope=self, id='ExecuteCognitoFix', service_token=cognito_lambda_provider.service_token) cognito_fix_resource.node.add_dependency(self.__es_domain)
def __init__(self, scope: core.Construct, id: str, prefix: str, source_bucket: s3.Bucket, dest_bucket: s3.Bucket, **kwargs) -> None: super().__init__(scope, id, **kwargs) suffix = Fn.select( 4, Fn.split('-', Fn.select(2, Fn.split('/', self.stack_id)))) # KMS key for Kinesis Data Streams self.__kms_key = Key(scope=self, id='kms-kinesis', alias='custom/kinesis', description='KMS key for Kinesis Data Streams', enable_key_rotation=True) # Create Kinesis streams self.__sale_stream = Stream(scope=self, id="saleStream", stream_name="ara-web-sale", encryption_key=self.__kms_key) self.__address_stream = Stream(scope=self, id="addressStream", stream_name="ara-web-customer-address", encryption_key=self.__kms_key) self.__customer_stream = Stream(scope=self, id="customerStream", stream_name="ara-web-customer", encryption_key=self.__kms_key) # Role for the KDA service kda_role = Role(scope=self, id='KinesisAnalyticsRole', assumed_by=ServicePrincipal( service='kinesisanalytics.amazonaws.com')) # Grant read on Kinesis streams self.__customer_stream.grant_read(kda_role) self.__address_stream.grant_read(kda_role) self.__sale_stream.grant_read(kda_role) # Grant read on source bucket (reference data) source_bucket.grant_read(kda_role) # Grant write on destination bucket dest_bucket.grant_write(kda_role) kda_role.add_to_policy( PolicyStatement(actions=['kinesis:ListShards'], resources=[ self.__customer_stream.stream_arn, self.__address_stream.stream_arn, self.__sale_stream.stream_arn ])) # Create Elasticsearch domain # TODO: use VPC subnets es_domain = EsDomain(scope=self, id='EsDomain', application_prefix=prefix, suffix=suffix, kda_role=kda_role) # Create the KDA application after the Elasticsearch service kda_app = KdaApplication(scope=self, id='KdaApplication', es_domain=es_domain.es_domain, kda_role=kda_role, source_bucket=source_bucket, dest_bucket=dest_bucket) core.Tags.of(self).add('module-name', 'streaming')