def __init__(self, scope: core.Construct, id: str, project_arn: str, device_pool_name: str, manufacturer: str = 'Google', platform: str = 'ANDROID', os_version='10', max_devices=1, log_retention=None) -> None: super().__init__(scope, id) self.device_pool_name = device_pool_name policy = AwsCustomResourcePolicy.from_sdk_calls( resources=AwsCustomResourcePolicy.ANY_RESOURCE) self.custom_resource = AwsCustomResource( scope=self, id=f'{id}-CustomResource', policy=policy, log_retention=log_retention, on_create=self.create_device_pool(project_arn, device_pool_name, platform, max_devices, os_version, manufacturer), on_update=self.update_device_pool(device_pool_name, platform, max_devices, os_version, manufacturer), on_delete=self.delete_device_pool(), resource_type='Custom::AWS-DeviceFarm-DevicePool') self.device_pool_arn = self.custom_resource.get_response_field_reference( 'devicePool.arn')
def add_contact_api(stack: CDKMasterStack, project_name: str, domain: str, forwarding_email: str): module_path = os.path.dirname(__file__) lambda_path = os.path.join(module_path, "lambda") api_path = "contact" base_lambda = aws_lambda.Function( stack, 'ContactFormLambda', handler='lambda_handler.handler', runtime=aws_lambda.Runtime.PYTHON_3_7, environment={ "TARGET_EMAIL": forwarding_email, "SENDER_EMAIL": f"contact@{domain}", "SENDER_NAME": f"{project_name.capitalize()}", "SENDER": f"{project_name.capitalize()} Contact Form <contact@{domain}>" }, code=aws_lambda.Code.asset(lambda_path), ) base_lambda.add_to_role_policy( aws_iam.PolicyStatement(effect=aws_iam.Effect.ALLOW, resources=["*"], actions=["ses:SendEmail", "ses:SendRawEmail"])) verify_domain_create_call = AwsSdkCall( service="SES", action="verifyDomainIdentity", parameters={"Domain": domain}, physical_resource_id=PhysicalResourceId.from_response( "VerificationToken")) policy_statement = PolicyStatement(actions=["ses:VerifyDomainIdentity"], resources=["*"]) verify_domain_identity = AwsCustomResource( stack, "VerifyDomainIdentity", on_create=verify_domain_create_call, policy=AwsCustomResourcePolicy.from_statements( statements=[policy_statement])) aws_route53.TxtRecord( stack, "SESVerificationRecord", zone=stack.zone, record_name=f"_amazonses.{domain}", values=[ verify_domain_identity.get_response_field("VerificationToken") ]) stack.add_api_method(api_path, "POST", base_lambda)
def update_parameters_for_auth_section( self, user_pool_client: aws_cognito.CfnUserPoolClient, user_pool_client_secret: custom_resources.AwsCustomResource, user_pool_domain: Optional[aws_cognito.CfnUserPoolDomain], tag: str, ): """ This contains nearly identical info as the "HostedUI" section above, but is organized differently for the AWSMobileClient. """ if not user_pool_domain: return app_client_id = user_pool_client.ref app_client_secret = user_pool_client_secret.get_response_field( "UserPoolClient.ClientSecret" ) web_domain = f"{user_pool_domain.domain}.auth.{self.region}.amazoncognito.com" scopes_string = self._secrets["hostedui.scopes"] scopes = scopes_string.split() sign_in_uri = self._secrets["hostedui.sign_in_redirect"] sign_out_uri = self._secrets["hostedui.sign_out_redirect"] self._parameters_to_save.update( { f"awsconfiguration/Auth/{tag}/OAuth/WebDomain": web_domain, f"awsconfiguration/Auth/{tag}/OAuth/AppClientId": app_client_id, f"awsconfiguration/Auth/{tag}/OAuth/AppClientSecret": app_client_secret, f"awsconfiguration/Auth/{tag}/OAuth/SignInRedirectURI": sign_in_uri, f"awsconfiguration/Auth/{tag}/OAuth/SignOutRedirectURI": sign_out_uri, f"awsconfiguration/Auth/{tag}/OAuth/Scopes": scopes, } )
def __init__(self, scope: core.Construct, id: str, bucket_name: str, object_key: str, object_content: Any, log_retention=None) -> None: super().__init__(scope, id) # This code is for demo puposes, we could have simple passed the bucket arn, but you can use this with bucket external to your stack target_bucket = s3.Bucket.from_bucket_name( scope=scope, id="CustomResourceDemoBucketExternal", bucket_name=bucket_name) on_create = self.get_on_create_update(bucket_name=bucket_name, object_key=object_key, object_content=object_content) on_update = on_create # Updating an S3 object is actually creating a new version on_delete = self.get_on_delete(bucket_name, object_key) policy = AwsCustomResourcePolicy.from_sdk_calls( resources=[f'{target_bucket.bucket_arn}/{object_key}']) lambda_role = self.get_provisioning_lambda_role(construct_id=id) AwsCustomResource(scope=scope, id=f'{id}-AWSCustomResource', policy=policy, log_retention=log_retention, on_create=on_create, on_update=on_update, on_delete=on_delete, resource_type='Custom::AWS-S3-Object', role=lambda_role)
def copy_from_assests_bucket_to_custom_bucket(self, construct_id, asset_bucket, file_name, s3_custom_bucket): asset_bucket_object = s3.Bucket.from_bucket_name( self, "AssetBucketObject", asset_bucket.s3_bucket_name) # Custom Resource Creation to Copy from Asset Bucket to Custom Bucket custom_resource_policy = AwsCustomResourcePolicy.from_sdk_calls( resources=[ f"{asset_bucket_object.bucket_arn}/*", f"{s3_custom_bucket.bucket_arn}/*" ]) custom_resource_lambda_role = _iam.Role( scope=self, id=f'{construct_id}-CustomResourceLambdaRole', assumed_by=_iam.ServicePrincipal('lambda.amazonaws.com'), managed_policies=[ _iam.ManagedPolicy.from_aws_managed_policy_name( "service-role/AWSLambdaBasicExecutionRole"), _iam.ManagedPolicy( scope=self, id=f'{construct_id}-CustomResourceLambdaPolicy', managed_policy_name="AssetsBucketAccessPolicy", statements=[ _iam.PolicyStatement(resources=[ f"{asset_bucket_object.bucket_arn}/*", f"{s3_custom_bucket.bucket_arn}/*" ], actions=[ "s3:List*", "s3:PutObject", "s3:GetObject" ]) ]) ]) on_create = AwsSdkCall(action='copyObject', service='S3', physical_resource_id=PhysicalResourceId.of( f'{asset_bucket.s3_bucket_name}'), parameters={ "Bucket": s3_custom_bucket.bucket_name, "CopySource": asset_bucket.s3_bucket_name + '/' + asset_bucket.s3_object_key, "Key": file_name }) custom_resource_creation = AwsCustomResource( scope=self, id='CustomResourceSyncWithS3', policy=custom_resource_policy, log_retention=logs.RetentionDays.ONE_WEEK, on_create=on_create, on_update=on_create, role=custom_resource_lambda_role, timeout=cdk.Duration.seconds(300)) return custom_resource_creation
def __init__(self, scope: Construct, id: str, bucket_name): super().__init__(scope, id) res = AwsCustomResource( scope=self, id='AWSCustomResource', policy=AwsCustomResourcePolicy.from_sdk_calls(resources=[f'arn:aws:s3:::{bucket_name}/*']), log_retention=logs.RetentionDays.INFINITE, on_create=self.create(bucket_name), on_delete=self.delete(bucket_name), resource_type='Custom::MyCustomResource' )
def get_resource(self): """ Creates a custom resource to create commits to codecommit. :return: Custom resource to create commits to codecommit. """ return AwsCustomResource(self.__stack, self.__prefix + "CiCdLambdaCustomCommitResource", on_create=self.__on_create(), on_update=self.__on_update(), on_delete=self.__on_delete(), role=self.__role())
def update_parameters_for_userpool( self, user_pool: aws_cognito.CfnUserPool, user_pool_client: aws_cognito.CfnUserPoolClient, user_pool_client_secret: custom_resources.AwsCustomResource, user_pool_domain: Optional[aws_cognito.CfnUserPoolDomain], tag: str, custom_endpoint: Optional[str] = None, ): pool_id = user_pool.ref app_client_id = user_pool_client.ref app_client_secret = user_pool_client_secret.get_response_field( "UserPoolClient.ClientSecret") self._parameters_to_save.update({ f"awsconfiguration/CognitoUserPool/{tag}/PoolId": pool_id, f"awsconfiguration/CognitoUserPool/{tag}/AppClientId": app_client_id, f"awsconfiguration/CognitoUserPool/{tag}/AppClientSecret": app_client_secret, f"awsconfiguration/CognitoUserPool/{tag}/Region": self.region, }) if custom_endpoint: self._parameters_to_save.update({ f"awsconfiguration/CognitoUserPool/{tag}/Endpoint": custom_endpoint, }) if user_pool_domain: url = f"https://{user_pool_domain.domain}.auth.{self.region}.amazoncognito.com" scopes_string = self._secrets["hostedui.scopes"] scopes = scopes_string.split() sign_in_uri = self._secrets["hostedui.sign_in_redirect"] sign_out_uri = self._secrets["hostedui.sign_out_redirect"] self._parameters_to_save.update({ f"awsconfiguration/CognitoUserPool/{tag}/HostedUI/WebDomain": url, f"awsconfiguration/CognitoUserPool/{tag}/HostedUI/AppClientId": app_client_id, f"awsconfiguration/CognitoUserPool/{tag}/HostedUI/AppClientSecret": app_client_secret, # noqa: E501 f"awsconfiguration/CognitoUserPool/{tag}/HostedUI/SignInRedirectURI": sign_in_uri, # noqa: E501 f"awsconfiguration/CognitoUserPool/{tag}/HostedUI/SignOutRedirectURI": sign_out_uri, # noqa: E501 f"awsconfiguration/CognitoUserPool/{tag}/HostedUI/Scopes": scopes, })
def get_resource(self): """ Creates a custom resource to manage a deployment group. :return: Custom resource to manage a deployment group. """ return AwsCustomResource( scope=self.__stack, id=self.__prefix + "CustomFargateDeploymentGroupResource", on_create=self.__on_create(), on_update=self.__on_update(), on_delete=self.__on_delete(), role=self.__custom_resource_role )
def __init__(self, scope: core.Construct, id: str, project_name: str, log_retention=None) -> None: super().__init__(scope, id) self.project_name = project_name policy = AwsCustomResourcePolicy.from_sdk_calls( resources=AwsCustomResourcePolicy.ANY_RESOURCE) self.custom_resource = AwsCustomResource( scope=self, id=f'{id}-CustomResource', policy=policy, log_retention=log_retention, on_create=self.create_project(project_name), on_update=self.update_project(project_name), on_delete=self.delete_project(project_name), resource_type='Custom::AWS-DeviceFarm-Project') self.project_arn = self.custom_resource.get_response_field_reference( 'project.arn') self.project_id = core.Fn.select( 6, core.Fn.split(":", core.Token.as_string(self.project_arn)))
def get_resource(self): """ Creates a custom resource to manage an ecs deployment configuration. :param scope: A scope in which this resource should be created. :return: Custom resource to manage an ecs deployment configuration. """ return AwsCustomResource(self.__stack, self.__prefix + "CustomDeploymentConfigResource", on_create=self.__on_create(), on_update=self.__on_update(), on_delete=self.__on_delete(), role=self.__role())
def __init__( self, scope: core.Construct, id_: str, role_alias: str, role_arn: str, credential_duration_seconds: int, log_retention=None, timeout=None, ) -> None: super().__init__(scope, id_) on_create = self.get_on_create( role_alias=role_alias, role_arn=role_arn, credential_duration_seconds=credential_duration_seconds) on_update = self.get_on_update( role_alias=role_alias, role_arn=role_arn, credential_duration_seconds=credential_duration_seconds) on_delete = self.get_on_delete(role_alias=role_alias) account_id = Stack.of(self).account region = Stack.of(self).region policy = AwsCustomResourcePolicy.from_sdk_calls(resources=[ f'arn:aws:iot:{region}:{account_id}:rolealias/{role_alias}' ]) lambda_role_singleton = CustomResourcesLambdaRole(scope) lambda_role_singleton.add_to_policy(actions=["iam:PassRole"], resources=[role_arn]) # lambda_role = self.get_provisioning_lambda_role(role_arn) AwsCustomResource(scope=self, id=f'CustomResource', policy=policy, log_retention=log_retention, on_create=on_create, on_update=on_update, on_delete=on_delete, resource_type='Custom::AWS-IoT-Role-Alias', role=lambda_role_singleton.role, timeout=timeout)
def __init__( self, scope: core.Construct, id_: str, bucket_name: str, object_key: str, object_content: Any, log_retention=None, timeout=Duration.seconds(amount=DEFAULT_S3_READ_TIMEOUT_SEC) ) -> None: super().__init__(scope, id_) tenant_config_bucket = s3.Bucket.from_bucket_name( scope=scope, id="TenantConfigBucket", bucket_name=bucket_name) on_create = self.get_on_create(bucket_name=bucket_name, object_key=object_key, object_content=object_content) on_update = on_create # Updating an S3 object is actually creating a new version on_delete = self.get_on_delete(bucket_name, object_key) policy = AwsCustomResourcePolicy.from_sdk_calls( resources=[f'{tenant_config_bucket.bucket_arn}/{object_key}']) lambda_role = self.get_provisioning_lambda_role() # lambda_role = None AwsCustomResource(scope=scope, id='AwsCustomResourceTenantMetadataS3Object', policy=policy, log_retention=log_retention, on_create=on_create, on_update=on_update, on_delete=on_delete, resource_type='Custom::AWS-S3-Object', role=lambda_role, timeout=timeout)
class DeviceFarmDevicePool(core.Construct): DEVICE_FARM_CFN_HANDLER_ACTIONS = [ "devicefarm:CreateDevicePool", "devicefarm:UpdateDevicePool", "devicefarm:DeleteDevicePool" ] device_pool_name = None device_pool_arn = None custom_resource = None def __init__(self, scope: core.Construct, id: str, project_arn: str, device_pool_name: str, manufacturer: str = 'Google', platform: str = 'ANDROID', os_version='10', max_devices=1, log_retention=None) -> None: super().__init__(scope, id) self.device_pool_name = device_pool_name policy = AwsCustomResourcePolicy.from_sdk_calls( resources=AwsCustomResourcePolicy.ANY_RESOURCE) self.custom_resource = AwsCustomResource( scope=self, id=f'{id}-CustomResource', policy=policy, log_retention=log_retention, on_create=self.create_device_pool(project_arn, device_pool_name, platform, max_devices, os_version, manufacturer), on_update=self.update_device_pool(device_pool_name, platform, max_devices, os_version, manufacturer), on_delete=self.delete_device_pool(), resource_type='Custom::AWS-DeviceFarm-DevicePool') self.device_pool_arn = self.custom_resource.get_response_field_reference( 'devicePool.arn') def create_device_pool(self, project_arn, device_pool_name, platform, max_devices, os_version, manufacturer): return AwsSdkCall( action='createDevicePool', service='DeviceFarm', parameters={ 'name': device_pool_name, 'projectArn': project_arn, 'rules': self._build_rules(platform, os_version, manufacturer), 'maxDevices': max_devices }, region= 'us-west-2', # When other endpoints become available for DF, we won't have to do this. physical_resource_id=PhysicalResourceId.from_response( "devicePool.arn")) def update_device_pool(self, device_pool_name, platform, max_devices, os_version, manufacturer): return AwsSdkCall( action='updateDevicePool', service='DeviceFarm', parameters={ 'arn': self.device_pool_arn, 'name': device_pool_name, 'rules': self._build_rules(platform, os_version, manufacturer), 'maxDevices': max_devices }, region= 'us-west-2', # When other endpoints become available for DF, we won't have to do this. physical_resource_id=PhysicalResourceId.from_response( "devicePool.arn")) def delete_device_pool(self): return AwsSdkCall( action='deleteDevicePool', service='DeviceFarm', parameters={'arn': self.device_pool_arn}, region= 'us-west-2', # When other endpoints become available for DF, we won't have to do this. physical_resource_id=PhysicalResourceId.from_response( "devicePool.arn")) def _build_rules(self, platform, os_version, manufacturer, availability='HIGHLY_AVAILABLE'): return [{ 'attribute': 'AVAILABILITY', 'operator': 'EQUALS', 'value': f"\"{availability}\"" }, { 'attribute': 'MANUFACTURER', 'operator': 'EQUALS', 'value': f"\"{manufacturer}\"" }, { 'attribute': 'PLATFORM', 'operator': 'EQUALS', 'value': f"\"{platform}\"" }, { 'attribute': 'OS_VERSION', 'operator': 'EQUALS', 'value': f"\"{os_version}\"" }]
def __init__( self, scope: core.Construct, id: str, central_account, **kwargs ) -> None: super().__init__(scope, id, **kwargs) self.current_dir = os.path.dirname(__file__) # Change to your central account self.central_account_id = central_account self.quicksight_migration_target_assume_role = iam.Role( self, "quicksight-migration-target-assume-role", description="Role for the Quicksight dashboard migration Lambda function to assume", role_name="quicksight-migration-target-assume-role", max_session_duration=core.Duration.seconds(3600), assumed_by=iam.ServicePrincipal("lambda.amazonaws.com"), inline_policies={ "AllowAccess": iam.PolicyDocument( statements=[ iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ "quicksight:*", ], resources=["*"], ), iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ "ssm:GetParameter", ], resources=[ f"arn:aws:ssm:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:parameter/infra/config" ], ), iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=["secretsmanager:GetSecretValue"], resources=[ f"arn:aws:secretsmanager:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:secret:*" ], ), ] ) }, ) self.quicksight_migration_target_assume_role.assume_role_policy.add_statements( iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=["sts:AssumeRole"], principals=[iam.AccountPrincipal(self.central_account_id)], ) ) ssm.StringParameter( self, "InfraConfigParam", parameter_name="/infra/config", string_value=json.dumps(self.to_dict()), ) self.quicksight_managed_resources_policy = iam.ManagedPolicy( self, "iam_policy", managed_policy_name="QuickSightMigrationPolicy", statements=[ iam.PolicyStatement( sid="AWSResourceAccess", effect=iam.Effect.ALLOW, actions=[ "iam:List*", "redshift:Describe*", "rds:Describe*", "athena:Get*", "athena:List*", "athena:BatchGetQueryExecution", "athena:StartQueryExecution", "athena:StopQueryExecution", "s3:GetBucketLocation", "s3:GetObject", "s3:ListBucket", "s3:ListBucketMultipartUploads", "s3:ListMultipartUploadParts", "s3:AbortMultipartUpload", "s3:CreateBucket", "s3:PutObject", "s3:PutBucketPublicAccessBlock", ], resources=["*"], ), ], ) # create a custom resource that will create a quicksight user using the IAM target role self.qs_user = AwsCustomResource( self, "AWSCustomResourceQSUser", role=self.quicksight_migration_target_assume_role, policy=AwsCustomResourcePolicy.from_sdk_calls( resources=AwsCustomResourcePolicy.ANY_RESOURCE ), on_create=self.create_qsuser(), on_delete=self.delete_qsuser(), resource_type="Custom::QuickSightMigrationUser", ) self.qs_user.node.add_dependency(self.quicksight_managed_resources_policy) # create a custom resource that will create a quicksight policy assignment self.qs_policy_assignment = AwsCustomResource( self, "AWSCustomResourceQSPolicyAssignment", role=self.quicksight_migration_target_assume_role, policy=AwsCustomResourcePolicy.from_sdk_calls( resources=AwsCustomResourcePolicy.ANY_RESOURCE ), on_create=self.create_policy_assignment(), on_delete=self.delete_policy_assignment(), resource_type="Custom::QuickSightPolicyAssignment", ) self.qs_policy_assignment.node.add_dependency(self.qs_user)
class DeviceFarmProject(core.Construct): DEVICE_FARM_CFN_HANDLER_ACTIONS = [ "devicefarm:DeleteProject", "devicefarm:CreateProject", "devicefarm:UpdateProject" ] project_name = None project_arn = None project_id = None custom_resource = None def __init__(self, scope: core.Construct, id: str, project_name: str, log_retention=None) -> None: super().__init__(scope, id) self.project_name = project_name policy = AwsCustomResourcePolicy.from_sdk_calls( resources=AwsCustomResourcePolicy.ANY_RESOURCE) self.custom_resource = AwsCustomResource( scope=self, id=f'{id}-CustomResource', policy=policy, log_retention=log_retention, on_create=self.create_project(project_name), on_update=self.update_project(project_name), on_delete=self.delete_project(project_name), resource_type='Custom::AWS-DeviceFarm-Project') self.project_arn = self.custom_resource.get_response_field_reference( 'project.arn') self.project_id = core.Fn.select( 6, core.Fn.split(":", core.Token.as_string(self.project_arn))) def create_project(self, project_name): return AwsSdkCall( action='createProject', service='DeviceFarm', parameters={'name': project_name}, region= 'us-west-2', # When other endpoints become available for DF, we won't have to do this. physical_resource_id=PhysicalResourceId.from_response( "project.arn")) def update_project(self, project_name): return AwsSdkCall( action='updateProject', service='DeviceFarm', parameters={ 'name': project_name, 'arn': self.project_arn }, region= 'us-west-2', # When other endpoints become available for DF, we won't have to do this. physical_resource_id=PhysicalResourceId.from_response( "project.arn")) def delete_project(self, project_name): return AwsSdkCall( action='deleteProject', service='DeviceFarm', parameters={'arn': self.project_arn}, region= 'us-west-2', # When other endpoints become available for DF, we won't have to do this. physical_resource_id=PhysicalResourceId.from_response( "project.arn")) def get_arn(self): return self.project_arn def get_project_id(self): return self.project_id
def __init__(self, scope: core.Construct, id: str, params: CodeStarLambdaParameters) -> None: super().__init__(scope, id) policy = aws_iam.PolicyStatement( actions=[ "iam:PassRole", "codestar:CreateProject", "codestar:UpdateProject", "codestar:DeleteProject", "s3:GetObject" ] ) policy.add_all_resources() # Deployment parameters. project_name = params.deployment_params.project_name bucket_name = params.deployment_params.bucket_name # S3 file keys, which coincide with file names in files/ folder. code_bucket_key = 'source.zip' toolchain_bucket_key = 'toolchain.yml' # VPC parameters for lambda function. subnet_ids = params.vpc_params.subnet_ids security_group_ids = params.vpc_params.security_group_ids # Parameters for function invocation event_type = params.lambda_type_params.event_type stack_parameters = { "ProjectId": project_name, "MySubnetIds": subnet_ids, "MySecurityGroupIds": security_group_ids, "EventType": event_type } if event_type == 'Schedule': schedule_expression = params.lambda_type_params.schedule_expression stack_parameters["ScheduleExpression"] = schedule_expression AwsCustomResource(self, "CreateProject", on_create={ "service": "CodeStar", "action": "createProject", "parameters": { 'id': project_name, 'name': project_name, 'sourceCode': [ { 'destination': { 'codeCommit': { 'name': project_name }, }, 'source': { 's3': { 'bucketKey': code_bucket_key, 'bucketName': bucket_name } } }, ], 'toolchain': { 'source': { 's3': { 'bucketKey': toolchain_bucket_key, 'bucketName': bucket_name } }, 'roleArn': 'arn:aws:iam::770536902058:role/service-role/aws-codestar-service-role', 'stackParameters': stack_parameters } }, "physicalResourceId": '123' }, on_update={ "service": "CodeStar", "action": "updateProject", "parameters": { 'id': project_name, "description": "dummy description", }, "physicalResourceId": '123' }, on_delete={ "service": "CodeStar", "action": "deleteProject", "parameters": { 'id': project_name, "deleteStack": True, }, "physicalResourceId": '123' }, policy_statements=[policy] )
def create_package(self): """ First step: - Define lambda role to ger ResourceToken - Define policy. In this step policy might be overpermissive, but we don't know the ARN resource \ So we will use ANY_RESOURCE tricl """ lambda_role_mediapackage = self.get_provisioning_lambda_role( role_name=f'stack-the-media-live') custom_policy = AwsCustomResourcePolicy.from_sdk_calls( resources=AwsCustomResourcePolicy.ANY_RESOURCE) """ Second step: - Define create/update/delete method for both: MediaPackageChannel and MediaPackageOriginEndpoint """ on_create_mediapackage = self.on_create_mediapackage() on_update_mediapackage = self.on_update_mediapackage() on_delete_mediapackage = self.on_delete_mediapackage() on_create_mediapackage_endpoint = self.on_create_mediapackage_endpoint( ) on_update_mediapackage_endpoint = self.on_update_mediapackage_endpoint( ) on_delete_mediapackage_endpoint = self.on_delete_mediapackage_endpoint( ) """ Third step: - Create MediaPackageChannel """ channel = AwsCustomResource( scope=self.scope, id=f'{self.id_channel}-MediaPackage-AWSCustomResource', policy=custom_policy, log_retention=None, # We don't need log at this moment on_create=on_create_mediapackage, on_update=on_update_mediapackage, on_delete=on_delete_mediapackage, resource_type='Custom::MediaPackageChannel', role=lambda_role_mediapackage, timeout=None ) # Timeout of the Lambda implementing this custom resource. Default: Duration.minutes(2) """ Fourth step: - Create MediaPackageOriginEndpoint By default HLS endpoint is the most common endpoint used, so we will create it You can choose your own endpoint here: https://docs.aws.amazon.com/AWSJavaScriptSDK/latest/AWS/MediaPackage.html#createOriginEndpoint-property """ hls_endpoint = AwsCustomResource( scope=self.scope, id=f'{self.id_channel}-MediaPackageEndpoint-AWSCustomResource', policy=custom_policy, log_retention=None, # We don't need log at this moment on_create=on_create_mediapackage_endpoint, on_update=on_update_mediapackage_endpoint, on_delete=on_delete_mediapackage_endpoint, resource_type='Custom::MediaPackageHlsEndpoint', role=lambda_role_mediapackage, timeout=None ) # Timeout of the Lambda implementing this custom resource. Default: Duration.minutes(2) """ Must fix the dependency among custom resources """ mediadep = core.ConcreteDependable() mediadep.add(channel) hls_endpoint.node.add_dependency(mediadep) core.CfnOutput(scope=self, id="media-package-url-strem", value=hls_endpoint.get_response_field("Url")) return hls_endpoint.get_response_field("Url")
def __init__(self, scope: core.Construct, id: str, es_domain: CfnDomain, kda_role: iam.Role, source_bucket: s3.Bucket, dest_bucket: s3.Bucket, **kwargs): super().__init__(scope, id, **kwargs) stack = Stack.of(self) kda_role.add_to_policy(PolicyStatement(actions=['cloudwatch:PutMetricData'], resources=['*'])) artifacts_bucket_arn = 'arn:aws:s3:::' + _config.ARA_BUCKET.replace("s3://", "") kda_role.add_to_policy(PolicyStatement(actions=['s3:GetObject', 's3:GetObjectVersion'], resources=[artifacts_bucket_arn, artifacts_bucket_arn + '/binaries/*'])) log_group = logs.LogGroup(scope=self, id='KdaLogGroup', retention=logs.RetentionDays.ONE_WEEK, removal_policy=RemovalPolicy.DESTROY) log_stream = logs.LogStream(scope=self, id='KdaLogStream', log_group=log_group, removal_policy=RemovalPolicy.DESTROY) log_stream_arn = stack.format_arn(service='logs', resource='log-group', resource_name=log_group.log_group_name + ':log-stream:' + log_stream.log_stream_name, sep=':') # TODO: restrict kda_role.add_to_policy(PolicyStatement(actions=['logs:*'], resources=[stack.format_arn(service='logs', resource='*')])) kda_role.add_to_policy(PolicyStatement(actions=['logs:DescribeLogStreams', 'logs:DescribeLogGroups'], resources=[log_group.log_group_arn, stack.format_arn(service='logs', resource='log-group', resource_name='*')])) kda_role.add_to_policy(PolicyStatement(actions=['logs:PutLogEvents'], resources=[log_stream_arn])) kda_role.add_to_policy(PolicyStatement(actions=['es:ESHttp*'], resources=[stack.format_arn(service='es', resource='domain', resource_name=es_domain.domain_name + '/*')])) # TODO: restrict kda_role.add_to_policy(PolicyStatement(actions=['s3:*'], resources=['arn:aws:s3::::*'])) # Define delivery stream # delivery_stream_name = 'clean_delivery_stream' # # s3_configuration = { # 'bucketArn': '', # 'compressionFormat': 'Snappy', # 'dataFormatConversionConfiguration': { # 'enabled': True, # 'inputFormatConfiguration': {'deserializer': }, # 'outputFormatConfiguration': {'serializer': {'parquetSerDe': }}, # 'schemaConfiguration': {} # }, # 'prefix': 'streaming' # } # # delivery_stream = CfnDeliveryStream(scope=self, # id='Firehose Delivery Stream', # delivery_stream_name=delivery_stream_name, # delivery_stream_type='DirectPut', # extended_s3_destination_configuration=s3_configuration # ) # Define KDA application application_configuration = { 'environmentProperties': { 'propertyGroups': [ { 'propertyGroupId': 'ConsumerConfigProperties', 'propertyMap': { 'CustomerStream': scope.customer_stream.stream_name, 'AddressStream': scope.address_stream.stream_name, 'SaleStream': scope.sale_stream.stream_name, 'PromoDataPath': source_bucket.s3_url_for_object('promo'), 'ItemDataPath': source_bucket.s3_url_for_object('item'), 'aws.region': scope.region } }, { 'propertyGroupId': 'ProducerConfigProperties', 'propertyMap': { 'ElasticsearchHost': 'https://' + es_domain.attr_domain_endpoint + ':443', 'Region': scope.region, 'DenormalizedSalesS3Path': dest_bucket.s3_url_for_object() + '/', 'IndexName': 'ara-write' } } ] }, 'applicationCodeConfiguration': { 'codeContent': { 's3ContentLocation': { 'bucketArn': artifacts_bucket_arn, 'fileKey': 'binaries/stream-processing-1.1.jar' } }, 'codeContentType': 'ZIPFILE' }, 'flinkApplicationConfiguration': { 'parallelismConfiguration': { 'configurationType': 'DEFAULT' }, 'checkpointConfiguration': { 'configurationType': 'DEFAULT' }, 'monitoringConfiguration': { 'logLevel': 'DEBUG', 'metricsLevel': 'TASK', 'configurationType': 'CUSTOM' } }, 'applicationSnapshotConfiguration': { 'snapshotsEnabled': False } } self.__app = CfnApplicationV2(scope=self, id='KDA application', runtime_environment='FLINK-1_11', application_name='KDA-application', service_execution_role=kda_role.role_arn, application_configuration=application_configuration) logging = CfnApplicationCloudWatchLoggingOptionV2(scope=self, id='KDA application logging', application_name=self.__app.ref, cloud_watch_logging_option={'logStreamArn': log_stream_arn}) logging.apply_removal_policy(policy=RemovalPolicy.RETAIN, apply_to_update_replace_policy=True, default=RemovalPolicy.RETAIN) # Use a custom resource to start the application create_params = { 'ApplicationName': self.__app.ref, 'RunConfiguration': { 'ApplicationRestoreConfiguration': { 'ApplicationRestoreType': 'SKIP_RESTORE_FROM_SNAPSHOT' }, 'FlinkRunConfiguration': { 'AllowNonRestoredState': True } } } # See https://docs.aws.amazon.com/AWSJavaScriptSDK/latest/ for service name, actions and parameters create_action = AwsSdkCall(service='KinesisAnalyticsV2', action='startApplication', parameters=create_params, physical_resource_id=PhysicalResourceId.of(self.__app.ref + '-start')) delete_action = AwsSdkCall(service='KinesisAnalyticsV2', action='stopApplication', parameters={'ApplicationName': self.__app.ref, 'Force': True}) custom_resource = AwsCustomResource(scope=self, id='KdaStartAndStop', on_create=create_action, on_delete=delete_action, policy=AwsCustomResourcePolicy.from_statements([PolicyStatement( actions=['kinesisanalytics:StartApplication', 'kinesisanalytics:StopApplication', 'kinesisanalytics:DescribeApplication', 'kinesisanalytics:UpdateApplication'], resources=[ stack.format_arn(service='kinesisanalytics', resource='application', resource_name=self.app.application_name)])])) custom_resource.node.add_dependency(self.app)