def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) memory_retention_param = core.CfnParameter(self, "memoryRetentionParam", type="Number", min_value=1, max_value=8766, default=6, description="The duration (in hours) for which data must be retained " "in the memory store per table.") magnetic_retention_param = core.CfnParameter(self, "magneticRetentionParam", type="Number", min_value=1, max_value=73000, default=15, description="The duration (in days) for which data must be retained " "in the magnetic store per table.") database = timestream.CfnDatabase(self, id="TimestreamDatabase", database_name="TimestreamDB") retention = { "MemoryStoreRetentionPeriodInHours": memory_retention_param.value_as_number, "MagneticStoreRetentionPeriodInDays": magnetic_retention_param.value_as_number } table = timestream.CfnTable(self, "SampleMetricsTable", database_name=database.database_name, retention_properties=retention, table_name="SampleMetricsTable") table.add_depends_on(database) self._database = database self._table = table
def __init__(self, scope: cdk.Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) log_bucket_name = cdk.Fn.import_value('sime-log-bucket-name') role_name_cwl_to_kdf = cdk.Fn.import_value('siem-cwl-to-kdf-role-name') role_name_kdf_to_s3 = cdk.Fn.import_value('siem-kdf-to-s3-role-name') kdf_ad_name = cdk.CfnParameter( self, 'KdfAdName', description='Kinesis Data Firehose Name to deliver AD event', default='siem-ad-event-to-s3') kdf_buffer_size = cdk.CfnParameter( self, 'KdfBufferSize', type='Number', description='Enter a buffer size between 1 - 128 (MiB)', default=1, min_value=1, max_value=128) kdf_buffer_interval = cdk.CfnParameter( self, 'KdfBufferInterval', type='Number', description='Enter a buffer interval between 60 - 900 (seconds.)', default=60, min_value=60, max_value=900) cwl_ad_name = cdk.CfnParameter( self, 'CwlAdName', description='CloudWatch Logs group name', default='/aws/directoryservice/d-XXXXXXXXXXXXXXXXX') kdf_to_s3 = aws_kinesisfirehose.CfnDeliveryStream( self, "KDFForAdEventLog", delivery_stream_name=kdf_ad_name.value_as_string, s3_destination_configuration=CDS. S3DestinationConfigurationProperty( bucket_arn=f'arn:aws:s3:::{log_bucket_name}', prefix= f'AWSLogs/{cdk.Aws.ACCOUNT_ID}/DirectoryService/MicrosoftAD/', buffering_hints=CDS.BufferingHintsProperty( interval_in_seconds=kdf_buffer_interval.value_as_number, size_in_m_bs=kdf_buffer_size.value_as_number), compression_format='UNCOMPRESSED', role_arn=(f'arn:aws:iam::{cdk.Aws.ACCOUNT_ID}:role/' f'service-role/{role_name_kdf_to_s3}'))) aws_logs.CfnSubscriptionFilter( self, 'KinesisSubscription', destination_arn=kdf_to_s3.attr_arn, filter_pattern='', log_group_name=cwl_ad_name.value_as_string, role_arn=(f'arn:aws:iam::{cdk.Aws.ACCOUNT_ID}:role/' f'{role_name_cwl_to_kdf}'))
def __init__(self, app: core.App, id: str) -> None: super().__init__(app, id) first_dynamodb_name = core.CfnParameter(self, "FirstDynamodbName", type="String") second_dynamodb_name = core.CfnParameter(self, "SecondDynamodbName", type="String") third_dynamodb_name = core.CfnParameter(self, "ThirdDynamodbName", type="String") fourth_dynamodb_name = core.CfnParameter(self, "FourthDynamodbName", type="String") # create dynamo table _first_dynamo_table = aws_dynamodb.Table( self, "onesix", removal_policy=core.RemovalPolicy.DESTROY, table_name=first_dynamodb_name.value_as_string, partition_key=aws_dynamodb.Attribute( name="id", type=aws_dynamodb.AttributeType.STRING)) _second_dynamo_table = aws_dynamodb.Table( self, "twofour", removal_policy=core.RemovalPolicy.DESTROY, table_name=second_dynamodb_name.value_as_string, partition_key=aws_dynamodb.Attribute( name="id", type=aws_dynamodb.AttributeType.STRING)) _third_dynamo_table = aws_dynamodb.Table( self, "threefour", removal_policy=core.RemovalPolicy.DESTROY, table_name=third_dynamodb_name.value_as_string, partition_key=aws_dynamodb.Attribute( name="id", type=aws_dynamodb.AttributeType.STRING)) _fourth_dynamo_table = aws_dynamodb.Table( self, "fourfour", removal_policy=core.RemovalPolicy.DESTROY, table_name=fourth_dynamodb_name.value_as_string, partition_key=aws_dynamodb.Attribute( name="id", type=aws_dynamodb.AttributeType.STRING)) core.CfnOutput(self, "_FirstdynamodbName", value=_first_dynamo_table.table_name) core.CfnOutput(self, "_SeconddynamodbName", value=_second_dynamo_table.table_name) core.CfnOutput(self, "_ThirddynamodbName", value=_third_dynamo_table.table_name) core.CfnOutput(self, "_FourthdynamodbName", value=_fourth_dynamo_table.table_name)
def __init__(self, scope: cdk.Construct, construct_id: str, default_firehose_name='siem-XXXXXXXXXXX-to-s3', firehose_compression_format='UNCOMPRESSED', **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) log_bucket_name = cdk.Fn.import_value('sime-log-bucket-name') role_name_kdf_to_s3 = cdk.Fn.import_value('siem-kdf-to-s3-role-name') kdf_name = cdk.CfnParameter( self, 'FirehoseName', description=('New Kinesis Data Firehose Name to deliver log. ' 'modify XXXXXXXXX'), default=default_firehose_name) kdf_buffer_size = cdk.CfnParameter( self, 'FirehoseBufferSize', type='Number', description='Enter a buffer size between 1 - 128 (MiB)', default=1, min_value=1, max_value=128) kdf_buffer_interval = cdk.CfnParameter( self, 'FirehoseBufferInterval', type='Number', description='Enter a buffer interval between 60 - 900 (seconds.)', default=60, min_value=60, max_value=900) s3_desitination_prefix = cdk.CfnParameter( self, 'S3DestPrefix', description='S3 destination prefix', default='AWSLogs/YourAccuntId/LogType/Region/') self.kdf_to_s3 = aws_kinesisfirehose.CfnDeliveryStream( self, "Kdf", delivery_stream_name=kdf_name.value_as_string, s3_destination_configuration=CDS. S3DestinationConfigurationProperty( bucket_arn=f'arn:aws:s3:::{log_bucket_name}', prefix=s3_desitination_prefix.value_as_string, buffering_hints=CDS.BufferingHintsProperty( interval_in_seconds=kdf_buffer_interval.value_as_number, size_in_m_bs=kdf_buffer_size.value_as_number), compression_format=firehose_compression_format, role_arn=(f'arn:aws:iam::{cdk.Aws.ACCOUNT_ID}:role/' f'service-role/{role_name_kdf_to_s3}')))
def create_model_artifact_location_parameter(scope): return core.CfnParameter( scope, "MODEL_ARTIFACT_LOCATION", type="String", description="Path to model artifact inside assets bucket.", )
def create_stack_name_parameter(scope): return core.CfnParameter( scope, "STACK_NAME", type="String", description="The name to assign to the deployed CF stack.", min_length=1)
def create_algorithm_image_uri_parameter(scope): return core.CfnParameter( scope, "IMAGE_URI", type="String", description="The algorithm image uri (build-in or custom)", )
def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) # The code that defines your stack goes here sns_topic = sns.Topic(self, 'Topic') snsEmail = core.CfnParameter( self, 'SNSEmail', default='PAnong@automation_rocks.com', description='Email Endpoint for SNS Notifications', type='String') email = sns_topic.add_subscription( subscriptions.EmailSubscription(snsEmail.value_as_string)) cwAlarm = cw.CfnAlarm( self, 'VPCAlarm', actions_enabled=True, alarm_actions=[sns_topic.topic_arn], alarm_description= "A CloudWatch Alarm that triggers when changes are made to the VPC.", comparison_operator="GreaterThanOrEqualToThreshold", evaluation_periods=1, treat_missing_data="notBreaching", threshold=1, metric_name="VpcEventCount", namespace="CloudTrailMetrics", period=300, statistic="Sum", )
def create_training_data_parameter(scope): return core.CfnParameter( scope, "TRAINING_DATA", type="String", description="Location of the training data in Assets S3 Bucket.", )
def create_image_tag_parameter(scope): return core.CfnParameter( scope, "IMAGE_TAG", type="String", description="Docker image tag for the custom algorithm", min_length=1)
def __init__(self, scope: Cdk.Construct, id: str, **kwargs) -> None: super().__init__( scope, id, description= "Lambda function for invoking cross-region CloudFormation Macros", **kwargs) lookupRegionParameter = Cdk.CfnParameter( self, "LookupRegion", description= "Region that Macro Proxy will look up SSM Parameters in", type="String") path = os.path.join(os.path.dirname(__file__), "src/app.py") with open(path) as f: code = Lambda.Code.from_inline(f.read()) function = Lambda.Function( self, "Function", code=code, environment={ "LOOKUP_AWS_REGION": lookupRegionParameter.value_as_string }, function_name="LittleOrangeCloudFormationMacroProxy", handler="index.handler", runtime=Lambda.Runtime.PYTHON_3_7, timeout=Cdk.Duration.seconds(30)) function.add_permission( "CloudFormationPermission", principal=Iam.ServicePrincipal("cloudformation.amazonaws.com"), action="lambda:InvokeFunction") # Allowing Principal * is a security issue # https://github.com/tomwwright/littleorange/issues/29 function.add_permission("AllAccountsPermission", principal=Iam.AccountPrincipal("*"), action="lambda:InvokeFunction") function.add_to_role_policy( Iam.PolicyStatement(actions=["lambda:InvokeFunction"], effect=Iam.Effect.ALLOW, resources=["*"])) function.add_to_role_policy( Iam.PolicyStatement( actions=["ssm:GetParameter"], effect=Iam.Effect.ALLOW, resources=[ "arn:aws:ssm:*:*:parameter/LittleOrange/CloudFormation/*" ])) output = Cdk.CfnOutput(self, "LambdaArn", value=function.function_arn) output = Cdk.CfnOutput(self, "LambdaName", value=function.function_name)
def create_instance_volume_size_parameter(scope): return core.CfnParameter( scope, "INSTANCE_VOLUME_SIZE", type="Number", description= "Instance volume size used in model moniroing jobs. E.g., 20", )
def create_batch_job_output_location_parameter(scope): return core.CfnParameter( scope, "BATCH_OUTPUT_LOCATION", type="String", description= "S3 path (including bucket name) to store the results of the batch job.", )
def create_template_file_name_parameter(scope): return core.CfnParameter( scope, "TEMPLATE_FILE_NAME", type="String", allowed_pattern="^.*\.yaml$", description="CloudFormation template's file name", )
def create_max_runtime_seconds_parameter(scope): return core.CfnParameter( scope, "MAX_RUNTIME_SECONDS", type="Number", description= "Max runtime in secodns the job is allowed to run. E.g., 3600", )
def create_assets_bucket_name_parameter(scope): return core.CfnParameter( scope, "ASSETS_BUCKET", type="String", description="Bucket name where the model and training data are stored.", min_length=3, )
def create_stage_params_file_name_parameter(scope, id, stage_type): return core.CfnParameter( scope, id, type="String", allowed_pattern="^.*\.json$", description=f"parameters json file's name for the {stage_type} stage", )
def create_batch_inference_data_parameter(scope): return core.CfnParameter( scope, "BATCH_INFERENCE_DATA", type="String", description= "S3 bukcet path (including bucket name) to batch inference data file.", )
def create_data_capture_location_parameter(scope): return core.CfnParameter( scope, "DATA_CAPTURE_LOCATION", type="String", description= "S3 path (including bucket name) to store captured data from the Sagemaker endpoint.", min_length=3, )
def create_monitoring_output_location_parameter(scope): return core.CfnParameter( scope, "MONITORING_OUTPUT_LOCATION", type="String", description= "S3 path (including bucket name) to store the output of the Monitoring Schedule.", min_length=3, )
def create_blueprint_bucket_name_parameter(scope): return core.CfnParameter( scope, "BLUEPRINT_BUCKET", type="String", description= "Bucket name for blueprints of different types of ML Pipelines.", min_length=3, )
def create_org_id_parameter(scope, id, account_type): return core.CfnParameter( scope, id, type="String", description= f"AWS {account_type} organizational unit id where the CF template will be deployed", allowed_pattern="^ou-[0-9a-z]{4,32}-[a-z0-9]{8,32}$", )
def create_account_id_parameter(scope, id, account_type): return core.CfnParameter( scope, id, type="String", description= f"AWS {account_type} account number where the CF template will be deployed", allowed_pattern="^\d{12}$", )
def create_schedule_expression_parameter(scope): return core.CfnParameter( scope, "SCHEDULE_EXPRESSION", type="String", description= "cron expression to run the monitoring schedule. E.g., cron(0 * ? * * *), cron(0 0 ? * * *), etc.", allowed_pattern="^cron(\\S+\\s){5}\\S+$", )
def create_baseline_job_output_location_parameter(scope): return core.CfnParameter( scope, "BASELINE_JOB_OUTPUT_LOCATION", type="String", description= "S3 path (including bucket name) to store the Data Baseline Job's output.", min_length=3, )
def create_template_zip_name_parameter(scope): return core.CfnParameter( scope, "TEMPLATE_ZIP_NAME", type="String", allowed_pattern="^.*\.zip$", description= "The zip file's name containing the CloudFormation template and its parameters files", )
def create_monitoring_schedule_name_parameter(scope): return core.CfnParameter( scope, "MONITORING_SCHEDULE_NAME", type="String", description="Unique name of the monitoring schedule job", min_length=3, max_length=63, )
def create_batch_input_bucket_name_parameter(scope): return core.CfnParameter( scope, "BATCH_INPUT_BUCKET", type="String", description= "Bucket name where the data input of the bact transform is stored.", min_length=3, )
def create_baseline_job_name_parameter(scope): return core.CfnParameter( scope, "BASELINE_JOB_NAME", type="String", description="Unique name of the data baseline job", min_length=3, max_length=63, )
def create_baseline_output_bucket_name_parameter(scope): return core.CfnParameter( scope, "BASELINE_OUTPUT_BUCKET", type="String", description= "Bucket name where the output of the baseline job will be stored.", min_length=3, )