def create_config(bosslet_config): """Create the CloudFormationConfiguration object.""" config = CloudFormationConfiguration('core', bosslet_config, version="2") session = bosslet_config.session keypair = bosslet_config.SSH_KEY names = bosslet_config.names config.add_vpc() # Create the internal and external subnets config.add_subnet('InternalSubnet', names.internal.subnet) config.add_subnet('ExternalSubnet', names.external.subnet) internal_subnets, external_subnets = config.add_all_subnets() internal_subnets_asg, external_subnets_asg = config.find_all_subnets('asg') # Create a custom resource to help delete ENIs from lambdas # DP NOTE: After deleting a lambda the ENIs may stick around for while, causing the stack delete to fail # See https://stackoverflow.com/a/41310289 config.add_arg(Arg.String('StackName', config.stack_name)) config.add_custom_resource('DeleteENI', 'DeleteENI', Arn('DeleteENILambda'), StackName=Ref('StackName')) config.add_lambda( "DeleteENILambda", names.delete_eni.lambda_, aws.role_arn_lookup(session, 'DeleteENI'), const.DELETE_ENI_LAMBDA, handler="index.handler", timeout= 180, # 3 minutes, so that there is enough time to wait for the ENI detach to complete runtime='python3.6' ) # If the lambda times out CF will retry a couple of times user_data = const.BASTION_USER_DATA.format(bosslet_config.NETWORK) config.add_ec2_instance("Bastion", names.bastion.dns, aws.ami_lookup(bosslet_config, const.BASTION_AMI), keypair, subnet=Ref("ExternalSubnet"), public_ip=True, user_data=user_data, security_groups=[ Ref("InternalSecurityGroup"), Ref("BastionSecurityGroup") ], depends_on="AttachInternetGateway") vault_role = aws.role_arn_lookup(session, 'apl-vault') vault_actions = ['kms:Encrypt', 'kms:Decrypt', 'kms:DescribeKey'] config.add_kms_key("VaultKey", names.vault.key, vault_role, vault_actions) config.add_dynamo_table("VaultTable", names.vault.ddb, attributes=[('Path', 'S'), ('Key', 'S')], key_schema=[('Path', 'HASH'), ('Key', 'RANGE')], throughput=(5, 5)) user_data = UserData() user_data["system"]["fqdn"] = names.vault.dns user_data["system"]["type"] = "vault" user_data["vault"]["kms_key"] = str(Ref("VaultKey")) user_data["vault"]["ddb_table"] = names.vault.ddb parsed_user_data = { "Fn::Join": ["", user_data.format_for_cloudformation()] } config.add_autoscale_group("Vault", names.vault.dns, aws.ami_lookup(bosslet_config, names.vault.ami), keypair, subnets=internal_subnets_asg, type_=const.VAULT_TYPE, security_groups=[Ref("InternalSecurityGroup")], user_data=parsed_user_data, min=const.VAULT_CLUSTER_SIZE, max=const.VAULT_CLUSTER_SIZE, notifications=Ref("DNSSNS"), role=aws.instance_profile_arn_lookup( session, 'apl-vault'), depends_on=[ "VaultKey", "VaultTable", "DNSLambda", "DNSSNS", "DNSLambdaExecute" ]) user_data = UserData() user_data["system"]["fqdn"] = names.auth.dns user_data["system"]["type"] = "auth" deps = [ "AuthSecurityGroup", "AttachInternetGateway", "DNSLambda", "DNSSNS", "DNSLambdaExecute" ] # Problem: If development scenario uses a local DB. If the auth server crashes # and is auto restarted by the autoscale group then the new auth server # will not have any of the previous configuration, because the old DB # was lost. Using an RDS for development fixes this at the cost of having # the core config taking longer to launch. USE_DB = bosslet_config.AUTH_RDS if USE_DB: deps.append("AuthDB") user_data["aws"][ "db"] = "keycloak" # flag for init script for which config to use cert = aws.cert_arn_lookup(session, names.public_dns('auth')) create_asg_elb(config, "Auth", names.auth.dns, aws.ami_lookup(bosslet_config, names.auth.ami), keypair, str(user_data), const.AUTH_CLUSTER_SIZE, internal_subnets_asg, external_subnets_asg, [("443", "8080", "HTTPS", cert)], "HTTP:8080/index.html", sgs=[Ref("AuthSecurityGroup")], type_=const.AUTH_TYPE, depends_on=deps) config.add_public_dns('AuthLoadBalancer', names.public_dns('auth')) if USE_DB: config.add_rds_db("AuthDB", names.auth_db.rds, "3306", "keycloak", "keycloak", "keycloak", internal_subnets, type_="db.t2.micro", security_groups=[Ref("InternalSecurityGroup")]) config.add_lambda("DNSLambda", names.dns.lambda_, aws.role_arn_lookup(session, 'UpdateRoute53'), const.DNS_LAMBDA, handler="index.handler", timeout=10, depends_on="DNSZone") config.add_lambda_permission("DNSLambdaExecute", Ref("DNSLambda")) config.add_sns_topic("DNSSNS", names.dns.sns, names.dns.sns, [("lambda", Arn("DNSLambda"))]) config.add_security_group("InternalSecurityGroup", names.internal.sg, [("-1", "-1", "-1", bosslet_config.NETWORK)]) # Allow SSH access to bastion from anywhere incoming_subnet = bosslet_config.SSH_INBOUND config.add_security_group("BastionSecurityGroup", names.ssh.sg, [("tcp", "22", "22", incoming_subnet)]) incoming_subnet = bosslet_config.HTTPS_INBOUND boss_subnet = { "Fn::Join": ["/", [Ref("NATIP"), "32"]] } # Allow requests from the endpoint via the NAT gateway # Needed in case HTTPS_INBOUND doesn't include the gateway's IP config.add_security_group( "AuthSecurityGroup", #names.https.sg, DP XXX: hack until we can get production updated correctly names.auth.sg, [("tcp", "443", "443", incoming_subnet), ("tcp", "443", "443", boss_subnet)]) # Create the internal route table to route traffic to the NAT Bastion all_internal_subnets = internal_subnets.copy() all_internal_subnets.append(Ref("InternalSubnet")) config.add_route_table("InternalRouteTable", names.internal.rt, subnets=all_internal_subnets) config.add_route_table_route("InternalNatRoute", Ref("InternalRouteTable"), nat=Ref("NAT"), depends_on="NAT") # Create the internet gateway and internet router all_external_subnets = external_subnets.copy() all_external_subnets.append(Ref("ExternalSubnet")) config.add_route_table("InternetRouteTable", names.internet.rt, subnets=all_external_subnets) config.add_route_table_route("InternetRoute", Ref("InternetRouteTable"), gateway=Ref("InternetGateway"), depends_on="AttachInternetGateway") config.add_internet_gateway("InternetGateway", names.internet.gw) config.add_endpoint("S3Endpoint", "s3", [Ref("InternalRouteTable"), Ref('InternetRouteTable')]) config.add_endpoint("DynamoDBEndpoint", "dynamodb", [Ref("InternalRouteTable"), Ref('InternetRouteTable')]) config.add_nat("NAT", Ref("ExternalSubnet"), depends_on="AttachInternetGateway") return config
def create_config(bosslet_config, user_data=None): """ Create the CloudFormationConfiguration object. Args: bosslet_config (BossConfiguration): target bosslet user_data (UserData): information used by the endpoint instance and vault. Data will be run through the CloudFormation Fn::Join template intrinsic function so other template intrinsic functions used in the user_data will be parsed and executed. Returns: the config for the Cloud Formation stack """ # Prepare user data for parsing by CloudFormation. if user_data is not None: parsed_user_data = { "Fn::Join" : ["", user_data.format_for_cloudformation()]} else: parsed_user_data = user_data keypair = bosslet_config.SSH_KEY session = bosslet_config.session names = bosslet_config.names config = CloudFormationConfiguration("cachedb", bosslet_config) vpc_id = config.find_vpc() ##### # TODO: When CF config files are refactored for multi-account support # the creation of _all_ subnets should be moved into core. # AWS doesn't charge for the VPC or subnets, so it doesn't # increase cost and cleans up subnet creation # Create several subnets for all the lambdas to use. internal_route_table_id = aws.rt_lookup(session, vpc_id, names.internal.rt) lambda_subnets = config.add_all_lambda_subnets() for lambda_subnet in lambda_subnets: key = lambda_subnet['Ref'] config.add_route_table_association(key + "RTA", internal_route_table_id, lambda_subnet) # Create a custom resource to help delete ENIs from lambdas # DP NOTE: After deleting a lambda the ENIs may stick around for while, causing the stack delete to fail # See https://stackoverflow.com/a/41310289 config.add_arg(Arg.String('StackName', config.stack_name)) config.add_arg(Arg.String('DeleteENILambda', aws.lambda_arn_lookup(session, names.delete_eni.lambda_))) config.add_custom_resource('DeleteENI', 'DeleteENI', Ref('DeleteENILambda'), StackName = Ref('StackName')) # Lookup the External Subnet, Internal Security Group IDs that are # needed by other resources internal_subnet_id = aws.subnet_id_lookup(session, names.internal.subnet) config.add_arg(Arg.Subnet("InternalSubnet", internal_subnet_id, "ID of Internal Subnet to create resources in")) internal_sg_id = aws.sg_lookup(session, vpc_id, names.internal.sg) config.add_arg(Arg.SecurityGroup("InternalSecurityGroup", internal_sg_id, "ID of internal Security Group")) role = aws.role_arn_lookup(session, "lambda_cache_execution") config.add_arg(Arg.String("LambdaCacheExecutionRole", role, "IAM role for " + names.multi_lambda.lambda_)) cuboid_import_role = aws.role_arn_lookup(session, CUBOID_IMPORT_ROLE) config.add_arg(Arg.String(CUBOID_IMPORT_ROLE, cuboid_import_role, "IAM role for cuboidImport")) config.add_capabilities(['CAPABILITY_IAM']) cuboid_bucket_name = names.cuboid_bucket.s3 if not aws.s3_bucket_exists(session, cuboid_bucket_name): config.add_s3_bucket("cuboidBucket", cuboid_bucket_name) config.add_s3_bucket_policy( "cuboidBucketPolicy", cuboid_bucket_name, ['s3:GetObject', 's3:PutObject'], { 'AWS': role}) config.append_s3_bucket_policy( "cuboidBucketPolicy", cuboid_bucket_name, ['s3:PutObject'], { 'AWS': cuboid_import_role}) delete_bucket_name = names.delete_bucket.s3 if not aws.s3_bucket_exists(session, delete_bucket_name): config.add_s3_bucket("deleteBucket", delete_bucket_name) config.add_s3_bucket_policy( "deleteBucketPolicy", delete_bucket_name, ['s3:GetObject', 's3:PutObject'], { 'AWS': role}) tile_bucket_name = names.tile_bucket.s3 if not aws.s3_bucket_exists(session, tile_bucket_name): life_cycle_cfg = get_cf_bucket_life_cycle_rules() config.add_s3_bucket( "tileBucket", tile_bucket_name, life_cycle_config=life_cycle_cfg) config.add_s3_bucket_policy( "tileBucketPolicy", tile_bucket_name, ['s3:GetObject', 's3:PutObject'], { 'AWS': role}) # The ingest bucket is a staging area for cuboids uploaded during volumetric ingest. creating_ingest_bucket = False ingest_bucket_name = names.ingest_bucket.s3 if not aws.s3_bucket_exists(session, ingest_bucket_name): creating_ingest_bucket = True ing_bucket_life_cycle_cfg = get_cf_bucket_life_cycle_rules() config.add_s3_bucket("ingestBucket", ingest_bucket_name, life_cycle_config=ing_bucket_life_cycle_cfg) config.add_s3_bucket_policy( "ingestBucketPolicy", ingest_bucket_name, ['s3:GetObject', 's3:PutObject', 's3:PutObjectTagging'], { 'AWS': cuboid_import_role}) config.add_ec2_instance("CacheManager", names.cachemanager.dns, aws.ami_lookup(bosslet_config, names.cachemanager.ami), keypair, subnet=Ref("InternalSubnet"), public_ip=False, type_=const.CACHE_MANAGER_TYPE, security_groups=[Ref("InternalSecurityGroup")], user_data=parsed_user_data, role="cachemanager") config.add_sqs_queue( names.ingest_cleanup_dlq.sqs, names.ingest_cleanup_dlq.sqs, 30, 20160) config.add_sqs_queue( names.cuboid_import_dlq.sqs, names.cuboid_import_dlq.sqs, 30, 20160) config.add_sqs_policy('cuboidImportDlqPolicy', 'cuboidImportDlqPolicy', [Ref(names.cuboid_import_dlq.sqs)], cuboid_import_role) config.add_lambda("MultiLambda", names.multi_lambda.lambda_, Ref("LambdaCacheExecutionRole"), handler='lambda_loader.handler', timeout=120, memory=1536, security_groups=[Ref('InternalSecurityGroup')], subnets=lambda_subnets) config.add_lambda("TileUploadedLambda", names.tile_uploaded.lambda_, Ref("LambdaCacheExecutionRole"), handler='tile_uploaded_lambda.handler', timeout=5, memory=1024) config.add_lambda("TileIngestLambda", names.tile_ingest.lambda_, Ref("LambdaCacheExecutionRole"), handler='tile_ingest_lambda.handler', timeout=30, memory=1536) config.add_lambda("DeleteTileObjsLambda", names.delete_tile_objs.lambda_, Ref("LambdaCacheExecutionRole"), handler='delete_tile_objs_lambda.handler', timeout=90, memory=128, dlq=Arn(names.ingest_cleanup_dlq.sqs)) config.add_lambda("DeleteTileEntryLambda", names.delete_tile_index_entry.lambda_, Ref("LambdaCacheExecutionRole"), handler='delete_tile_index_entry_lambda.handler', timeout=90, memory=128, dlq=Arn(names.ingest_cleanup_dlq.sqs)) config.add_lambda("CuboidImportLambda", names.cuboid_import_lambda.lambda_, Ref(CUBOID_IMPORT_ROLE), handler='cuboid_import_lambda.handler', timeout=90, memory=128, dlq=Arn(names.cuboid_import_dlq.sqs)) config.add_lambda("VolumetricIngestLambda", names.volumetric_ingest_queue_upload_lambda.lambda_, Ref("LambdaCacheExecutionRole"), handler='ingest_queue_upload_volumetric_lambda.handler', timeout=120, memory=1024) if creating_ingest_bucket: config.add_lambda_permission( 'ingestBucketInvokeCuboidImportLambda', names.cuboid_import_lambda.lambda_, principal='s3.amazonaws.com', source={ 'Fn::Join': [':', ['arn', 'aws', 's3', '', '', ingest_bucket_name]]}, #DP TODO: move into constants depends_on=['ingestBucket', 'CuboidImportLambda'] ) else: # NOTE: this permission doesn't seem to apply properly when doing a # CloudFormation update. During testing, I had to manually apply this # permission before the bucket trigger could be applied in post_init(). # Doing a CloudFormation delete followed by a create did not have a # problem. config.add_lambda_permission( 'ingestBucketInvokeCuboidImportLambda', names.cuboid_import_lambda.lambda_, principal='s3.amazonaws.com', source={ 'Fn::Join': [':', ['arn', 'aws', 's3', '', '', ingest_bucket_name]]}, depends_on='CuboidImportLambda' ) # Add topic to indicating that the object store has been write locked. # Now using "production mailing list" instead of separate write lock topic. #config.add_sns_topic('WriteLock', # names.write_lock_topic, # names.write_lock, # []) # TODO: add subscribers return config