def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) prj_name = self.node.try_get_context('project_name') env_name = self.node.try_get_context('env') account_id = core.Aws.ACCOUNT_ID region = core.Aws.REGION api_gateway = apigw.RestApi( self, 'rest-api', endpoint_types=[apigw.EndpointType.REGIONAL], rest_api_name=f'{prj_name}-service') api_gateway.root.add_method(http_method='ANY') ssm.StringParameter( self, 'api-gw', parameter_name=f'/{env_name}/api-gw-url', string_value= f'https://{api_gateway.rest_api_id}.execute-api.{region}.amazonaws.com/' ) ssm.StringParameter(self, 'api-gw-id', parameter_name=f'/{env_name}/api-gw-id', string_value=api_gateway.rest_api_id)
def __init__(self, scope: core.Construct, id: str, vpc: ec2.Vpc, lambdasg: ec2.SecurityGroup, bastionsg: ec2.SecurityGroup, kmskey, **kwargs) -> None: super().__init__(scope, id, **kwargs) prj_name = self.node.try_get_context('project_name') env_name = self.node.try_get_context('env') creds_json_template = {'username': '******'} db_creds = sm.Secret( self, id="db-secret", secret_name=f'{env_name}-rds-secret', generate_secret_string=sm.SecretStringGenerator( include_space=False, # no space in secret password_length=12, generate_string_key= 'rds-password', # key in json dictionary for the password exclude_punctuation=True, secret_string_template=json.dumps(creds_json_template))) db_name = f'pryancdkdb' db_mysql = rds.DatabaseCluster( self, id=f'{env_name}-mysql', default_database_name=db_name, engine=rds.DatabaseClusterEngine.aurora_mysql( version=rds.AuroraMysqlEngineVersion.VER_5_7_12), master_user=rds.Login( username='******', password=db_creds.secret_value_from_json('rds-password')), instance_props=rds.InstanceProps( vpc=vpc, vpc_subnets=ec2.SubnetSelection( subnet_type=ec2.SubnetType.ISOLATED), # will pick one of the isolated Subnets from the vpc instance_type=ec2.InstanceType( instance_type_identifier='t3.small')), instances=1, storage_encrypted=True, storage_encryption_key=kmskey, removal_policy=core.RemovalPolicy.DESTROY) # we need to define the ingress rules for rds db_mysql.connections.allow_default_port_from( lambdasg, 'Access from Lambda Functions') db_mysql.connections.allow_default_port_from( bastionsg, "Access from bastion host") # ssm ssm.StringParameter(self, id=f'{env_name}-db-host', parameter_name=f"/{env_name}/db-host", string_value=db_mysql.cluster_endpoint.hostname) ssm.StringParameter(self, id=f'{env_name}-db-name', parameter_name=f"/{env_name}/db-name", string_value=db_name)
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) prj_name = self.node.try_get_context("project_name") env_name = self.node.try_get_context("env") account = core.Aws.ACCOUNT_ID region = core.Aws.REGION api_gateway = apigw.RestApi( self, 'restapi', endpoint_types=[apigw.EndpointType.REGIONAL], rest_api_name=prj_name + '-service') api_gateway.root.add_method('ANY') ssm.StringParameter(self, 'api-gw', parameter_name='/' + env_name + '/api-gw-url', string_value='https://' + api_gateway.rest_api_id + '.execute-api.' + region + '.amazonaws.com/') ssm.StringParameter(self, 'api-gw-id', parameter_name='/' + env_name + 'api-gw-id', string_value=api_gateway.rest_api_id)
def __init__(self, scope: core.Construct, id: str, **kwargs): super().__init__(scope, id, **kwargs) env_name = self.node.try_get_context('env') dyn_db = self.node.try_get_context('dynamodb-arn') self.lambda_basic_role = iam.Role( self, 'lambdabasicrole', assumed_by=iam.ServicePrincipal(service='lambda.amazonaws.com'), role_name=f'{env_name}-cdk-lambda-role', managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name( 'service-role/AWSLambdaBasicExecutionRole') ]) # self.lambda_basic_role.add_to_policy( # statement=iam.PolicyStatement(actions=['dynamodb:PutItem'], # resources=[dyn_db]) # ) self.lambda_basic_role.add_to_policy(statement=iam.PolicyStatement( actions=['s3:*', 'rds:*'], resources=['*'])) ssm.StringParameter(self, f'{env_name}-lambdarole-arn-param', parameter_name=f"/{env_name}/lambda-role-arn", string_value=self.lambda_basic_role.role_arn) ssm.StringParameter(self, f'{env_name}-lambdarole-name-param', parameter_name=f"/{env_name}/lambda-role-name", string_value=self.lambda_basic_role.role_name)
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) prj_name = self.node.try_get_context("project_name") env_name = self.node.try_get_context("env") user_pool = cognito.CfnUserPool( self, 'cognitouserpool', auto_verified_attributes=['email'], username_attributes=['email', 'phone_number'], user_pool_name=prj_name + '-user-pool', schema=[{ 'attributeDataType': 'String', 'name': 'param1', 'mutable': True }], policies=cognito.CfnUserPool.PoliciesProperty( password_policy=cognito.CfnUserPool.PasswordPolicyProperty( minimum_length=10, require_lowercase=True, require_numbers=True, require_symbols=False, require_uppercase=True))) user_pool_client = cognito.CfnUserPoolClient( self, 'pool-client', user_pool_id=user_pool.ref, client_name=env_name + '-app-client') identity_pool = cognito.CfnIdentityPool( self, 'identitypool', allow_unauthenticated_identities=False, cognito_identity_providers=[ cognito.CfnIdentityPool.CognitoIdentityProviderProperty( client_id=user_pool_client.ref, provider_name=user_pool.attr_provider_name) ], identity_pool_name=prj_name + '-identity-pool') ssm.StringParameter(self, 'app-id', parameter_name='/' + env_name + '/cognito-app-client-id', string_value=user_pool_client.ref) ssm.StringParameter(self, 'user-pool-id', parameter_name='/' + env_name + '/cognito-user-pool-id', string_value=user_pool_client.user_pool_id) ssm.StringParameter(self, 'identity-pool-id', parameter_name='/' + env_name + '/cognito-identity-pool-id', string_value=identity_pool.ref)
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) params1 = _ssm.StringParameter( self, "parameter1Id", description="Load Testing Configuration", parameter_name="NoOfConCurrentUsers", string_value="100", tier=_ssm.ParameterTier.STANDARD, ) output1 = core.CfnOutput( self, "parameter1Output", description="Number of concurrent users", value=f"{params1.string_value}", ) params2 = _ssm.StringParameter( self, "parameter2Id", description="Load Testing Configuration", parameter_name="/locus/configuration/NoOfConCurrentUsers", string_value="100", tier=_ssm.ParameterTier.STANDARD, ) params3 = _ssm.StringParameter( self, "parameter3Id", description="Load Testing Configuration", parameter_name="/locus/configuration/DurationInSec", string_value="300", tier=_ssm.ParameterTier.STANDARD, ) secret1 = _secretsmanager.Secret(self, "secret1Id", description="Customer DB password", secret_name="cust_db_pass") output2 = core.CfnOutput( self, "secret1Output", description="secret 1", value=f"{secret1.secret_value}", ) templated_secret = _secretsmanager.Secret( self, "secret2Id", description="Templated secret for user data", secret_name="user_kon_attributes", generate_secret_string=_secretsmanager.SecretStringGenerator( secret_string_template=json.dumps({"username": "******"}), generate_string_key="password", ), )
def __init__(self, scope: core.Construct, id: str, vpc: ec2.Vpc, lambdasg: ec2.SecurityGroup, bastionsg: ec2.SecurityGroup, kmskey, **kwargs) -> None: super().__init__(scope, id, **kwargs) prj_name = self.node.try_get_context("project_name") env_name = self.node.try_get_context("env") json_template = {'username': '******'} db_creds = sm.Secret( self, 'db-secret', secret_name=env_name + '/rds-secret', generate_secret_string=sm.SecretStringGenerator( include_space=False, password_length=12, generate_string_key='password', exclude_punctuation=True, secret_string_template=json.dumps(json_template))) db_mysql = rds.DatabaseCluster( self, 'mysql', default_database_name=prj_name + env_name, engine=rds.DatabaseClusterEngine.AURORA_MYSQL, engine_version="5.7.12", master_user=rds.Login( username='******', password=db_creds.secret_value_from_json('password')), instance_props=rds.InstanceProps( vpc=vpc, vpc_subnets=ec2.SubnetSelection( subnet_type=ec2.SubnetType.ISOLATED), instance_type=ec2.InstanceType( instance_type_identifier="t3.small")), instances=1, parameter_group=rds.ClusterParameterGroup. from_parameter_group_name( self, 'pg-dev', parameter_group_name='default.aurora-mysql5.7'), kms_key=kmskey, removal_policy=core.RemovalPolicy.DESTROY) db_mysql.connections.allow_default_port_from( lambdasg, "Access from Lambda functions") db_mysql.connections.allow_default_port_from( bastionsg, "Allow from bastion host") #SSM Parameter ssm.StringParameter(self, 'db-host', parameter_name='/' + env_name + '/db-host', string_value=db_mysql.cluster_endpoint.hostname) ssm.StringParameter(self, 'db-name', parameter_name='/' + env_name + '/db-name', string_value=prj_name + env_name)
def __init__(self, scope: core.Construct, id: str, s3bucket,acmcert, **kwargs) -> None: super().__init__(scope, id, **kwargs) prj_name = self.node.try_get_context("project_name") env_name = self.node.try_get_context("env") bucketName = s3.Bucket.from_bucket_name(self,'s3bucket',s3bucket) self.cdn_id = cdn.CloudFrontWebDistribution(self,'webhosting-cdn', origin_configs=[cdn.SourceConfiguration( behaviors=[ cdn.Behavior(is_default_behavior=True) ], origin_path="/build", s3_origin_source=cdn.S3OriginConfig( s3_bucket_source=bucketName, origin_access_identity=cdn.OriginAccessIdentity(self,'webhosting-origin') ) )], error_configurations=[cdn.CfnDistribution.CustomErrorResponseProperty( error_code=400, response_code=200, response_page_path="/" ), cdn.CfnDistribution.CustomErrorResponseProperty( error_code=403, response_code=200, response_page_path="/" ), cdn.CfnDistribution.CustomErrorResponseProperty( error_code=404, response_code=200, response_page_path="/" ) ], alias_configuration=cdn.AliasConfiguration( acm_cert_ref=acmcert.certificate_arn, names=['app.cloudevangelist.ca'] ) ) ssm.StringParameter(self,'cdn-dist-id', parameter_name='/'+env_name+'/app-distribution-id', string_value=self.cdn_id.distribution_id ) ssm.StringParameter(self,'cdn-url', parameter_name='/'+env_name+'/app-cdn-url', string_value='https://'+self.cdn_id.domain_name )
def __init__(self, scope: core.Construct, id: str, vpc: ec2.Vpc, **kwargs) -> None: super().__init__(scope, id, **kwargs) prj_name = self.node.try_get_context('project_name') env_name = self.node.try_get_context('env') self.lambda_sg = ec2.SecurityGroup( self, 'lambdasg', security_group_name='lambda-sg', vpc=vpc, description='security group for lambda functions', allow_all_outbound=True) self.bastion_sg = ec2.SecurityGroup(self, 'bastion-sg', security_group_name='bastion_sg', vpc=vpc, description='sg for bastion host', allow_all_outbound=True) self.bastion_sg.add_ingress_rule( peer=ec2.Peer.any_ipv4(), connection=ec2.Port.tcp(22), description='SSH Access for bastion host') lambda_role = iam.Role( self, 'lambda-role', assumed_by=iam.ServicePrincipal(service='lambda.amazonaws.com'), role_name='lambda-role', managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name( managed_policy_name= 'service-role/AWSLambdaBasicExecutionRole') ]) lambda_role.add_to_policy(statement=iam.PolicyStatement( actions=['s3:*', 'rds:*', 'ec2:*'], resources=['*'])) # Create SSM params ssm.StringParameter(self, 'lambdasg-param', parameter_name=f'/{env_name}/lambda-sg', string_value=self.lambda_sg.security_group_id) ssm.StringParameter(self, 'lambdarolearn-param', parameter_name=f'/{env_name}/lambda-role-arn', string_value=lambda_role.role_arn) ssm.StringParameter(self, 'lambdarolename-param', parameter_name=f'/{env_name}/lambda-role-name', string_value=lambda_role.role_name)
def __init__(self, scope: core.Construct, id: str, vpc: ec2.Vpc, **kwargs) -> None: super().__init__(scope, id, **kwargs) lambda_security_group = ec2.SecurityGroup( self, "LambdaSG", security_group_name="lambda-sg", vpc=vpc, description="security group for lambda functions", allow_all_outbound=True) self.bastion_host_security_group = ec2.SecurityGroup( self, "BastionSG", security_group_name="bastion-host-sg", vpc=vpc, description="security group for bastion host", allow_all_outbound=True) self.bastion_host_security_group.add_ingress_rule( ec2.Peer.any_ipv4(), ec2.Port.tcp(22), "SSH Access") lambda_role = iam.Role( self, 'LambdaRole', assumed_by=iam.ServicePrincipal('lambda.amazonaws.com'), role_name='lambda-role', managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name( managed_policy_name= 'service-role/AWSLambdaBasicExecutionRole') ]) lambda_role.add_to_policy(statement=iam.PolicyStatement( actions=['s3:*', 'rds:*'], resources=['*'])) ssm.StringParameter( self, "lambda-sg-parameter", parameter_name='/lambda-sg', string_value=lambda_security_group.security_group_id) ssm.StringParameter(self, "lambda-rolearn-parameter", parameter_name='/lambda-role-arn', string_value=lambda_role.role_arn) ssm.StringParameter(self, "lambda-rolename-parameter", parameter_name='/lambda-role-name', string_value=lambda_role.role_name)
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # Let us create AWS secrets & SSM Parameters): param1 = _ssm.StringParameter( self, "parameter1", description="Load Testing Configuration", parameter_name="NoOfConcurrentUsers", string_value="100", tier=_ssm.ParameterTier.STANDARD # choose transaction rate ) param2 = _ssm.StringParameter( self, "parameter2", description="Load Testing Configuration", parameter_name="/locust/configs/NoOfConcurrentUsers", string_value="100", tier=_ssm.ParameterTier.STANDARD) param3 = _ssm.StringParameter( self, "parameter3", description="Load Testing Configuration", parameter_name="/locust/configs/DurationInSec", string_value="300", tier=_ssm.ParameterTier.STANDARD) secret1 = _secretsmanager.Secret(self, "secret1", description="Customer DB password", secret_name="cust_db_pass") # hierarchy of secrets templated_secret = _secretsmanager.Secret( self, "secret2", description="A Templated secret for user data", secret_name="user_kon_attributes", generate_secret_string=_secretsmanager.SecretStringGenerator( secret_string_template=json.dumps({"username": "******"}), generate_string_key="password")) output_1 = core.CfnOutput(self, "param1", description="NoOfConcurrentUser", value=f"{param1.string_value}") output_2 = core.CfnOutput(self, "secret1Value", description="secret1", value=f"{secret1.secret_value}")
def __init__(self, scope: cdk.Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) "Create Secrets & SSM Parameters: " param1 = _ssm.StringParameter(self, "Parameter1", description="Load testing configuration", parameter_name="No_Of_Concurrent_Users", string_value="100", tier=_ssm.ParameterTier.STANDARD) param2 = _ssm.StringParameter( self, "Parameter2", description="Load testing configuration", parameter_name="/locust/configs/No_Of_Concurrent_Users", string_value="100", tier=_ssm.ParameterTier.STANDARD) param3 = _ssm.StringParameter( self, "Parameter3", description="Load testing configuration", parameter_name="/locust/configs/DurationInSec", string_value="300", tier=_ssm.ParameterTier.STANDARD) """ Build Secrets in Secrets Manager: """ secret1 = _secretsmanager.Secret(self, "Secret1", description="Customer DB password", secret_name="Custom_DB_Password") templated_secret = _secretsmanager.Secret( self, "Secret2", description="A Templated secret for user data", secret_name="User_Kon_Attributes", generate_secret_string=_secretsmanager.SecretStringGenerator( secret_string_template=json.dumps({"username": "******"}), generate_string_key="password")) """ Output: """ output_1 = cdk.CfnOutput(self, "Parameter1Value", description="No_Of_Concurrent_Users", value=f"{param1.string_value}") output_2 = cdk.CfnOutput(self, "Secret1Value", value=f"{secret1.secret_value}")
def __init__(self, scope:core.Construct, id:str, s3bucket, **kwargs): super().__init__(scope, id, **kwargs) prj_name = self.node.try_get_context('project_name') env_name = self.node.try_get_context('env') bucketName = s3.Bucket.from_bucket_name(self, 's3bucket', s3bucket) cdn_id = cdn.CloudFrontWebDistribution( self, 'webhosting-cdn', origin_configs=[ cdn.SourceConfiguration( behaviors=[cdn.Behavior(is_default_behavior=True)], origin_path='/build', s3_origin_source=cdn.S3OriginConfig( s3_bucket_source=bucketName, origin_access_identity=cdn.OriginAccessIdentity(self, 'webhosting-origin') ) ) ], error_configurations=[ cdn.CfnDistribution.CustomErrorResponseProperty( error_code=400, response_code=200, response_page_path='/' ), cdn.CfnDistribution.CustomErrorResponseProperty( error_code=403, response_code=200, response_page_path='/' ), cdn.CfnDistribution.CustomErrorResponseProperty( error_code=404, response_code=200, response_page_path='/' ) ], ) ## ssm params ssm.StringParameter(self, 'cdn-id', parameter_name=f'/{env_name}/cdn-id', string_value=cdn_id.distribution_id) ssm.StringParameter(self, 'cdn-url', parameter_name=f'/{env_name}/cdn-url', string_value=f'https://{cdn_id.distribution_domain_name}')
def make_ssm_parameter(self, base_param_name: str, param_value: str, description: str): ''' Creates and tags an SSM Parameter Parameters ---------- base_param_name : str The base parameter name, wihtout the application namespace prefix param_value : str parameter value description : str parameter description ''' param = ssm.StringParameter( self, base_param_name, parameter_name="%s_%s" % (self.APPLICATION_PREFIX.upper(), base_param_name), string_value=param_value, description=description #,type=ssm.ParameterType.SECURE_STRING ) util.tag_resource(param, base_param_name, description) return param
def __init__(self, scope: core.Construct, id: str, *, prefix: str, environment: str, configuration, **kwargs): """ :param scope: Stack class, used by CDK. :param id: ID of the construct, used by CDK. :param prefix: Prefix of the construct, used for naming purposes. :param environment: Environment of the construct, used for naming purposes. :param configuration: Configuration of the construct. In this case SSM_PARAMETER_STRING. :param kwargs: Other parameters that could be used by the construct. """ super().__init__(scope, id, **kwargs) self.prefix = prefix self.environment_ = environment self._configuration = configuration # Validating that the payload passed is correct validate_configuration( configuration_schema=SSM_PARAMETER_STRING_SCHEMA, configuration_received=self._configuration) self._parameter_string = ssm.StringParameter( self, id="/" + self.prefix + "/" + self.environment_ + "/" + self._configuration["name"] + "/appConfig", parameter_name="/" + self.prefix + "/" + self.environment_ + "/" + self._configuration["name"] + "/appConfig", description=self._configuration.get("description"), string_value=json.dumps(self._configuration["string_value"]), type=ssm.ParameterType.STRING, )
def __init__(self, scope: Construct, id: str, connection_name: str, *, host_arn: str = None, ssm_parameter_space: str = '/github/connections', **connection_args) -> None: super().__init__(scope, id) if host_arn: connection_args['host_arn'] = host_arn self.connection = aws_codestarconnections.CfnConnection( self, 'github-connection', connection_name=connection_name, provider_type='GitHub', **connection_args) CfnOutput( self, "output", value=self.connection.attr_connection_arn, description= "Validate with Github app connection at: https://console.aws.amazon.com/codesuite/settings/connections" ) if ssm_parameter_space: aws_ssm.StringParameter( self, "ssm", string_value=self.connection.attr_connection_arn, parameter_name=f"{ssm_parameter_space}/{connection_name}")
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) self.vpc = ec2.Vpc( self, "Vpc", cidr='10.0.0.0/16', max_azs=2, enable_dns_hostnames=True, enable_dns_support=True, subnet_configuration=[ ec2.SubnetConfiguration(name="PublicSubnet", subnet_type=ec2.SubnetType.PUBLIC, cidr_mask=24), ec2.SubnetConfiguration(name="PrivateSubnet", subnet_type=ec2.SubnetType.PRIVATE, cidr_mask=24), ec2.SubnetConfiguration(name="IsolatedSubnet", subnet_type=ec2.SubnetType.ISOLATED, cidr_mask=24) ], nat_gateways=1) private_subnet_ids = [ subnet.subnet_id for subnet in self.vpc.private_subnets ] count = 1 for privatesubnet in private_subnet_ids: ssm.StringParameter(self, "private-subnet-" + str(count), string_value=privatesubnet, parameter_name='/private-subnet-' + str(count)) count += 1
def __init__(self, scope: core.Construct, _id: str, bucket_para, **kwargs) -> None: super().__init__(scope, _id, **kwargs) self.ddb_file_list = ddb.Table(self, "ddb", table_name=table_queue_name, partition_key=ddb.Attribute(name="Key", type=ddb.AttributeType.STRING), billing_mode=ddb.BillingMode.PAY_PER_REQUEST) self.sqs_queue_DLQ = sqs.Queue(self, "sqs_DLQ", queue_name=table_queue_name + "-DLQ", visibility_timeout=core.Duration.hours(1), retention_period=core.Duration.days(14) ) self.sqs_queue = sqs.Queue(self, "sqs_queue", queue_name=table_queue_name, visibility_timeout=core.Duration.hours(1), retention_period=core.Duration.days(14), dead_letter_queue=sqs.DeadLetterQueue( max_receive_count=24, queue=self.sqs_queue_DLQ ) ) self.ssm_bucket_para = ssm.StringParameter(self, "para-bucket", string_value=json.dumps(bucket_para), parameter_name=ssm_parameter_bucket ) # You need to manually setup ssm_credential_para in SSM Parameter Store before deploy CDK # Here import ssm_credential_para, MIND THE VERSION NUMBER MUST BE EXACT THE SAME !!! # 你需要先手工配置了一个ssm_credential_para,然后在这里导入,注意版本号一致!!! self.ssm_credential_para = ssm.StringParameter.from_secure_string_parameter_attributes( self, "ssm_parameter_credentials", parameter_name=ssm_parameter_credentials, version=2 )
def __init__( self, scope: Construct, construct_id: str, *, deploy_env: str, parameter_name: ParameterName, sort_key: Optional[aws_dynamodb.Attribute] = None, ): super().__init__( scope, construct_id, partition_key=aws_dynamodb.Attribute( name="pk", type=aws_dynamodb.AttributeType.STRING), sort_key=sort_key, point_in_time_recovery=True, removal_policy=REMOVAL_POLICY, billing_mode=aws_dynamodb.BillingMode.PAY_PER_REQUEST, ) self.name_parameter = aws_ssm.StringParameter( self, f"{construct_id} table name for {deploy_env}", string_value=self.table_name, parameter_name=parameter_name.value, )
def __init__(self, scope: core.Construct, id: str, idp_name: str, idp_url: str, *, cfn_lambda: str = None, cfn_resources_path: str = None, debug=False): """Create an IAM SAML Identity Provider Args: scope (core.Construct): [description] id (str): [description] idp_name (str): IAM Idp name idp_url (str): Your SAML Identity provider URL """ rdir = sys.prefix + '/share/aviv-cdk/iam-idp/' if not cfn_lambda: p = os.path.dirname(os.path.dirname(__file__)) cfn_lambda = p + '/lambdas/iam_idp/saml.py' lambda_attrs = dict(code=aws_lambda.InlineCode( CDKLambda._code_inline(cfn_lambda)), handler='index.handler', timeout=core.Duration.seconds(20), runtime=aws_lambda.Runtime.PYTHON_3_7) if not cfn_resources_path: cfn_resources_path = rdir + 'artifacts-cfn_resources.zip' layer_attrs = dict(description='cfn_resources layer for idp', code=aws_lambda.AssetCode(cfn_resources_path)) super().__init__(scope, id, lambda_attrs=lambda_attrs, layer_attrs=layer_attrs, remote_account_grant=False) # Add required policies for the lambda to create an IAM idp self._lambda.add_to_role_policy( iam.PolicyStatement(actions=[ 'iam:CreateSAMLProvider', 'iam:UpdateSAMLProvider', 'iam:DeleteSAMLProvider' ], effect=iam.Effect.ALLOW, resources=['*'])) self._idp = cfn.CustomResource( self, "identityProvider", resource_type='Custom::SAMLProvider', provider=cfn.CustomResourceProvider.lambda_(self._lambda), properties=dict(Name=idp_name, URL=idp_url)) self.response = self._idp.get_att("Response").to_string() # Export ssm_name = '/' + id.replace('-', '/') ssm.StringParameter(self, 'ssm', string_value=self._idp.ref, parameter_name=ssm_name) core.CfnOutput(self, 'IAMIdpSAMLArn', value=self._idp.ref)
def __init__(self, scope: core.Stack, id: str, ssm_parameters_file: str = 'ssm_parameters.json', ssm_parameters_dir: str = 'ssm_parameters', lifecycle: str = 'lifecycle', ** kwargs) -> None: super().__init__(scope, id, **kwargs) """Adds arameters to SSM parameter store from json files. :param ssm_parameters_file: Json file with parameters. (Depricated) :param ssm_parameters_dir: Directory with json files contain parameters. :param lifecycle: The name (value) of the prefix for adding to key. It will not be added if ${lifecycle} key and value are abesnt in the json file. For instance if lifecycle = 'lifecycle', then for adding prefix to key, in the json file it has be "lifecycle": "dev1". Elif lifecycle = 'environment', then in the json file it has be "environment": "dev1". """ for filename in os.listdir(ssm_parameters_dir): if filename.endswith('.json'): with open(os.path.join(ssm_parameters_dir, filename), 'rb') as f: ssm_parameters = json.load(f) for key, value in ssm_parameters.items(): if lifecycle in ssm_parameters and key != lifecycle: key = '/' + ssm_parameters[lifecycle] + key if key == lifecycle: continue _ssm.StringParameter( self, f'{key}', string_value=value, parameter_name=key )
def __init__( self, scope: Construct, id: str, context: "Context", team_context: "TeamContext", parameters: Dict[str, Any], ) -> None: super().__init__( scope=scope, id=id, stack_name=id, env=Environment(account=context.account_id, region=context.region), ) Tags.of(scope=cast(IConstruct, self)).add( key="Env", value=f"orbit-{context.name}") _logger.info(f"Plugin parameters: {parameters}") # just showing how to create resource. Do not forget to update the IAM policy or make sure the attached policy # for the team is allowing the creation and destruction of the resource. ssm_parameter: str = f"/orbit/{context.name}/{team_context.name}/hello-plugin" ssm.StringParameter( scope=self, id="param", string_value="testing plugin hello world", parameter_name=ssm_parameter, )
def __init__(self, scope: core.Construct, id: str, shard_count: int, **kwargs) -> None: super().__init__(scope, id, **kwargs) # The code that defines your stack goes here): ############################################## ####### CDK STABILITY EXPERIMENTAL ####### ############################################## self.kinesis_data_pipe = _kinesis.Stream( self, "dataPipe", # retention_period_hours=24, # retention_period=core.Duration.minutes(300), shard_count=1, stream_name="data_pipe") self.data_pipe_ssm_param = _ssm.StringParameter( self, "dataPipeParameter", description="Kinesis Stream Name", parameter_name= f"/{global_args.REPO_NAME}/streams/data_pipe/stream_name", string_value=f"{self.kinesis_data_pipe.stream_name}") output_0 = core.CfnOutput( self, "AutomationFrom", value=f"{global_args.SOURCE_INFO}", description= "To know more about this automation stack, check out our github page." )
def __init__(self, scope: Construct, stack_id: str, *, deploy_env: str, **kwargs: Any) -> None: super().__init__(scope, stack_id, **kwargs) ############################################################################################ # ### DATASET STAGING S3 BUCKET ############################################################ ############################################################################################ self.staging_bucket = aws_s3.Bucket( self, "dataset-staging-bucket", access_control=aws_s3.BucketAccessControl.PRIVATE, block_public_access=aws_s3.BlockPublicAccess.BLOCK_ALL, versioned=True, removal_policy=RemovalPolicy.DESTROY, ) self.staging_bucket_name_parameter = aws_ssm.StringParameter( self, "staging bucket name", description=f"Staging bucket name for {deploy_env}", parameter_name=ParameterName.STAGING_BUCKET_NAME.value, string_value=self.staging_bucket.bucket_name, ) Tags.of(self).add("ApplicationLayer", "staging") # type: ignore[arg-type]
def _create_manifest_parameter(self) -> ssm.StringParameter: parameter: ssm.StringParameter = ssm.StringParameter( scope=self, id="/orbit/EnvParams", string_value=json.dumps({ "EksClusterRoleArn": self.role_eks_cluster.role_arn, "EksFargateProfileRoleArn": self.role_fargate_profile.role_arn, "EksEnvNodegroupRoleArn": self.role_eks_env_nodegroup.role_arn, "EksClusterAutoscalerRoleArn": self.role_cluster_autoscaler.role_arn, "UserPoolId": self.user_pool.user_pool_id, "UserPoolClientId": self.user_pool_client.user_pool_client_id, "IdentityPoolId": self.identity_pool.ref, "ClusterPodSecurityGroupId": self.cluster_pod_security_group.security_group_id, }), type=ssm.ParameterType.STRING, description="Orbit Workbench Remote Env.", parameter_name=self.context.env_ssm_parameter_name, simple_name=False, tier=ssm.ParameterTier.INTELLIGENT_TIERING, ) return parameter
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) prj_name = self.node.try_get_context("project_name") env_name = self.node.try_get_context("env") basic_rule = waf.CfnWebACL.RuleProperty( name='AWSManagedCommonRue', priority=0, statement=waf.CfnWebACL.StatementOneProperty( managed_rule_group_statement=waf.CfnWebACL. ManagedRuleGroupStatementProperty( name='AWSManagedRulesCommonRuleSet', vendor_name='AWS')), override_action=waf.CfnWebACL.OverrideActionProperty(count={}), visibility_config=waf.CfnWebACL.VisibilityConfigProperty( cloud_watch_metrics_enabled=True, metric_name='AWSManagedCommonRule', sampled_requests_enabled=True)) web_acl = waf.CfnWebACL( self, 'web-acl-id', default_action=waf.CfnWebACL.DefaultActionProperty(allow={}), scope='CLOUDFRONT', visibility_config=waf.CfnWebACL.VisibilityConfigProperty( cloud_watch_metrics_enabled=True, metric_name=prj_name + '-' + env_name, sampled_requests_enabled=True), name=prj_name + '-' + env_name + 'webacl', rules=[basic_rule]) ssm.StringParameter(self, 'web-id-ssm', parameter_name='/' + env_name + '/webacl-id', string_value=web_acl.attr_id)
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) prj_name = self.node.try_get_context('project_name') env_name = self.node.try_get_context('env') self.vpc = ec2.Vpc( self, 'devVPC', cidr="10.10.0.0/16", max_azs=2, enable_dns_hostnames=True, enable_dns_support=True, subnet_configuration=[ ec2.SubnetConfiguration(name='Public', subnet_type=ec2.SubnetType.PUBLIC, cidr_mask=24), ec2.SubnetConfiguration(name='data', subnet_type=ec2.SubnetType.ISOLATED, cidr_mask=24) ]) public_subnets = [ subnet.subnet_id for subnet in self.vpc.public_subnets ] count = 1 for ps in public_subnets: ssm.StringParameter( self, f'public-subnet-{str(count)}', string_value=ps, parameter_name=f'/{env_name}/public-subnet-{str(count)}') count += 1
def __init__(self, scope: core.Construct, id: str, cluster: ecs.ICluster, repo: ecr.IRepository, **kwargs) -> None: super().__init__(scope, id, **kwargs) # service skeleton streamproc_task_definition = ecs_patterns.ScheduledFargateTask( scope=self, id="SftpTaskDef", cluster=cluster, desired_task_count=1, schedule=applicationautoscaling.Schedule.rate( duration=core.Duration.minutes(5)), scheduled_fargate_task_image_options=ecs_patterns. ScheduledFargateTaskImageOptions( image=ecs.ContainerImage.from_ecr_repository(repository=repo, tag='latest'), cpu=1024, memory_limit_mib=2048)) streamproc_task_definition.task_definition.task_role.add_to_policy( statement=iam.PolicyStatement( resources=['*'], actions=[ 'servicediscovery:DiscoverInstances', 'secretsmanager:Get*', 'ec2:Describe*' ])) ssm.StringParameter(scope=self, id='SSMParamSftpImageName', string_value=streamproc_task_definition. task_definition.default_container.container_name, parameter_name='image_sftp')
def __init__(self, scope: core.Construct, id: str, config: dict, **kwargs): super().__init__(scope, id, **kwargs) self._code_server_password = ssm.StringParameter( self, "CodeServerPassword", parameter_name=config["code_server_auth_key"], string_value=config["code_server_auth_val"])
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) #ToDo: Create a SSM command param = ssm.StringParameter( scope=self, id="StringParameter", string_value="Initial parameter value", )