def create_config(session, domain): """Create the CloudFormationConfiguration object.""" config = CloudFormationConfiguration('activities', domain, const.REGION) names = AWSNames(domain) global keypair keypair = aws.keypair_lookup(session) vpc_id = config.find_vpc(session) sgs = aws.sg_lookup_all(session, vpc_id) internal_subnets, _ = config.find_all_availability_zones(session) internal_subnets_lambda, _ = config.find_all_availability_zones( session, lambda_compatible_only=True) topic_arn = aws.sns_topic_lookup(session, "ProductionMicronsMailingList") event_data = { "lambda-name": "delete_lambda", "db": names.endpoint_db, "meta-db": names.meta, "s3-index-table": names.s3_index, "id-index-table": names.id_index, "id-count-table": names.id_count_index, "cuboid_bucket": names.cuboid_bucket, "delete_bucket": names.delete_bucket, "topic-arn": topic_arn, "query-deletes-sfn-name": names.query_deletes, "delete-sfn-name": names.delete_cuboid, "delete-exp-sfn-name": names.delete_experiment, "delete-coord-frame-sfn-name": names.delete_coord_frame, "delete-coll-sfn-name": names.delete_collection } role_arn = aws.role_arn_lookup(session, "events_for_delete_lambda") multi_lambda = names.multi_lambda lambda_arn = aws.lambda_arn_lookup(session, multi_lambda) target_list = [{ "Arn": lambda_arn, "Id": multi_lambda, "Input": json.dumps(event_data) }] schedule_expression = "cron(1 6-11/1 ? * TUE-FRI *)" #schedule_expression = "cron(0/2 * * * ? *)" # testing fire every two minutes config.add_event_rule("DeleteEventRule", names.delete_event_rule, role_arn=role_arn, schedule_expression=schedule_expression, target_list=target_list, description=None) # Events have to be given permission to run lambda. config.add_lambda_permission('DeleteRulePerm', multi_lambda, principal='events.amazonaws.com', source=Arn('DeleteEventRule')) user_data = UserData() user_data["system"]["fqdn"] = names.activities user_data["system"]["type"] = "activities" user_data["aws"]["db"] = names.endpoint_db user_data["aws"]["cache"] = names.cache user_data["aws"]["cache-state"] = names.cache_state user_data["aws"]["cache-db"] = "0" user_data["aws"]["cache-state-db"] = "0" user_data["aws"]["meta-db"] = names.meta user_data["aws"]["cuboid_bucket"] = names.cuboid_bucket user_data["aws"]["tile_bucket"] = names.tile_bucket user_data["aws"]["ingest_bucket"] = names.ingest_bucket user_data["aws"]["s3-index-table"] = names.s3_index user_data["aws"]["tile-index-table"] = names.tile_index user_data["aws"]["id-index-table"] = names.id_index user_data["aws"]["id-count-table"] = names.id_count_index config.add_autoscale_group("Activities", names.activities, aws.ami_lookup(session, 'activities.boss'), keypair, subnets=internal_subnets_lambda, type_=const.ACTIVITIES_TYPE, security_groups=[sgs[names.internal]], user_data=str(user_data), role=aws.instance_profile_arn_lookup( session, "activities"), min=1, max=1) config.add_lambda("IngestLambda", names.ingest_lambda, aws.role_arn_lookup(session, 'IngestQueueUpload'), const.INGEST_LAMBDA, handler="index.handler", timeout=60 * 5) config.add_lambda_permission("IngestLambdaExecute", Ref("IngestLambda")) return config
def create_config(bosslet_config, lookup=True): """Create the CloudFormationConfiguration object.""" config = CloudFormationConfiguration('activities', bosslet_config) names = bosslet_config.names keypair = bosslet_config.SSH_KEY session = bosslet_config.session vpc_id = config.find_vpc() sgs = aws.sg_lookup_all(session, vpc_id) internal_subnets, _ = config.find_all_subnets() internal_subnets_asg, _ = config.find_all_subnets(compatibility='asg') topic_arn = aws.sns_topic_lookup(session, bosslet_config.ALERT_TOPIC) if topic_arn is None: raise MissingResourceError('SNS topic', bosslet_config.ALERT_TOPIC) event_data = { "lambda-name": "delete_lambda", "db": names.endpoint_db.rds, "meta-db": names.meta.ddb, "s3-index-table": names.s3_index.ddb, "id-index-table": names.id_index.ddb, "id-count-table": names.id_count_index.ddb, "cuboid_bucket": names.cuboid_bucket.s3, "delete_bucket": names.delete_bucket.s3, "topic-arn": topic_arn, "query-deletes-sfn-name": names.query_deletes.sfn, "delete-sfn-name": names.delete_cuboid.sfn, "delete-exp-sfn-name": names.delete_experiment.sfn, "delete-coord-frame-sfn-name": names.delete_coord_frame.sfn, "delete-coll-sfn-name": names.delete_collection.sfn, } role_arn = aws.role_arn_lookup(session, "events_for_delete_lambda") multi_lambda = names.multi_lambda.lambda_ if lookup: lambda_arn = aws.lambda_arn_lookup(session, multi_lambda) else: lambda_arn = None target_list = [{ "Arn": lambda_arn, "Id": multi_lambda, "Input": json.dumps(event_data) }] schedule_expression = "cron(1 6-11/1 ? * TUE-FRI *)" #schedule_expression = "cron(0/2 * * * ? *)" # testing fire every two minutes config.add_event_rule("DeleteEventRule", # XXX What type for event rules? names.delete_event_rule.dns, role_arn=role_arn, schedule_expression=schedule_expression, target_list=target_list, state='DISABLED') # Disabled until new delete is finished. # Events have to be given permission to run lambda. config.add_lambda_permission('DeleteRulePerm', multi_lambda, principal='events.amazonaws.com', source=Arn('DeleteEventRule')) user_data = UserData() user_data["system"]["fqdn"] = names.activities.dns user_data["system"]["type"] = "activities" user_data["aws"]["db"] = names.endpoint_db.rds user_data["aws"]["cache"] = names.cache.redis user_data["aws"]["cache-state"] = names.cache_state.redis user_data["aws"]["cache-db"] = "0" user_data["aws"]["cache-state-db"] = "0" user_data["aws"]["meta-db"] = names.meta.ddb user_data["aws"]["cuboid_bucket"] = names.cuboid_bucket.s3 user_data["aws"]["tile_bucket"] = names.tile_bucket.s3 user_data["aws"]["ingest_bucket"] = names.ingest_bucket.s3 user_data["aws"]["s3-index-table"] = names.s3_index.ddb user_data["aws"]["tile-index-table"] = names.tile_index.ddb user_data["aws"]["id-index-table"] = names.id_index.ddb user_data["aws"]["id-count-table"] = names.id_count_index.ddb user_data["aws"]["max_task_id_suffix"] = str(const.MAX_TASK_ID_SUFFIX) user_data["aws"]["tile_ingest_lambda"] = names.tile_ingest.lambda_ user_data["aws"]["tile_uploaded_lambda"] = names.tile_uploaded.lambda_ config.add_autoscale_group("Activities", names.activities.dns, aws.ami_lookup(bosslet_config, names.activities.ami), keypair, subnets=internal_subnets_asg, type_=const.ACTIVITIES_TYPE, security_groups=[sgs[names.internal.sg]], user_data=str(user_data), role=aws.instance_profile_arn_lookup(session, "activities"), min=1, max=1) config.add_lambda("IngestLambda", names.ingest_lambda.lambda_, aws.role_arn_lookup(session, 'IngestQueueUpload'), const.INGEST_LAMBDA, handler="index.handler", timeout=60 * 5, runtime='python3.6', memory=3008) config.add_lambda_permission("IngestLambdaExecute", Ref("IngestLambda")) # Downsample / Resolution Hierarchy support lambda_role = aws.role_arn_lookup(session, "lambda_resolution_hierarchy") config.add_lambda("DownsampleVolumeLambda", names.downsample_volume.lambda_, lambda_role, handler="downsample_volume.handler", timeout=120, memory=1024, dlq = Ref('DownsampleDLQ')) start_sfn_lambda_role = aws.role_arn_lookup(session, 'StartStepFcnLambdaRole') config.add_lambda("startSfnLambda", names.start_sfn.lambda_, start_sfn_lambda_role, handler="start_sfn_lambda.handler", timeout=60, memory=128) # This dead letter queue behavior uses a lambda to put failed lambda # executions into a dlqs created specifically for each downsample job. # There is a separate dlq for each resolution. config.add_sns_topic("DownsampleDLQ", names.downsample_dlq.sns, names.downsample_dlq.sns, [('lambda', Arn('DownsampleDLQLambda'))]) config.add_lambda('DownsampleDLQLambda', names.downsample_dlq.lambda_, lambda_role, const.DOWNSAMPLE_DLQ_LAMBDA, handler='index.handler', runtime='python3.7', timeout=10) config.add_lambda_permission('DownsampleDLQLambdaExecute', Ref('DownsampleDLQLambda')) return config
def create_config(session, domain): """Create the CloudFormationConfiguration object.""" config = CloudFormationConfiguration('activities', domain, const.REGION) names = AWSNames(domain) global keypair keypair = aws.keypair_lookup(session) vpc_id = config.find_vpc(session) sgs = aws.sg_lookup_all(session, vpc_id) internal_subnets, _ = config.find_all_availability_zones(session) internal_subnets_lambda, _ = config.find_all_availability_zones(session, lambda_compatible_only=True) topic_arn = aws.sns_topic_lookup(session, "ProductionMicronsMailingList") event_data = { "lambda-name": "delete_lambda", "db": names.endpoint_db, "meta-db": names.meta, "s3-index-table": names.s3_index, "id-index-table": names.id_index, "id-count-table": names.id_count_index, "cuboid_bucket": names.cuboid_bucket, "delete_bucket": names.delete_bucket, "topic-arn": topic_arn, "query-deletes-sfn-name": names.query_deletes, "delete-sfn-name": names.delete_cuboid, "delete-exp-sfn-name": names.delete_experiment, "delete-coord-frame-sfn-name": names.delete_coord_frame, "delete-coll-sfn-name": names.delete_collection } role_arn = aws.role_arn_lookup(session, "events_for_delete_lambda") multi_lambda = names.multi_lambda lambda_arn = aws.lambda_arn_lookup(session, multi_lambda) target_list = [{ "Arn": lambda_arn, "Id": multi_lambda, "Input": json.dumps(event_data) }] schedule_expression = "cron(1 6-11/1 ? * TUE-FRI *)" #schedule_expression = "cron(0/2 * * * ? *)" # testing fire every two minutes config.add_event_rule("DeleteEventRule", names.delete_event_rule, role_arn=role_arn, schedule_expression=schedule_expression, target_list=target_list, description=None) # Events have to be given permission to run lambda. config.add_lambda_permission('DeleteRulePerm', multi_lambda, principal='events.amazonaws.com', source=Arn('DeleteEventRule')) user_data = UserData() user_data["system"]["fqdn"] = names.activities user_data["system"]["type"] = "activities" user_data["aws"]["db"] = names.endpoint_db user_data["aws"]["cache"] = names.cache user_data["aws"]["cache-state"] = names.cache_state user_data["aws"]["cache-db"] = "0" user_data["aws"]["cache-state-db"] = "0" user_data["aws"]["meta-db"] = names.meta user_data["aws"]["cuboid_bucket"] = names.cuboid_bucket user_data["aws"]["tile_bucket"] = names.tile_bucket user_data["aws"]["ingest_bucket"] = names.ingest_bucket user_data["aws"]["s3-index-table"] = names.s3_index user_data["aws"]["tile-index-table"] = names.tile_index user_data["aws"]["id-index-table"] = names.id_index user_data["aws"]["id-count-table"] = names.id_count_index user_data["aws"]["max_task_id_suffix"] = str(const.MAX_TASK_ID_SUFFIX) config.add_autoscale_group("Activities", names.activities, aws.ami_lookup(session, 'activities.boss'), keypair, subnets=internal_subnets_lambda, type_=const.ACTIVITIES_TYPE, security_groups=[sgs[names.internal]], user_data=str(user_data), role=aws.instance_profile_arn_lookup(session, "activities"), min=1, max=1) config.add_lambda("IngestLambda", names.ingest_lambda, aws.role_arn_lookup(session, 'IngestQueueUpload'), const.INGEST_LAMBDA, handler="index.handler", timeout=60 * 5, memory=3008) config.add_lambda_permission("IngestLambdaExecute", Ref("IngestLambda")) # Downsample / Resolution Hierarchy support lambda_role = aws.role_arn_lookup(session, "lambda_resolution_hierarchy") config.add_lambda("DownsampleVolumeLambda", names.downsample_volume_lambda, lambda_role, s3=(aws.get_lambda_s3_bucket(session), "multilambda.{}.zip".format(domain), "downsample_volume.handler"), timeout=120, memory=1024, runtime='python3.6', dlq = Ref('DownsampleDLQ')) config.add_sns_topic("DownsampleDLQ", names.downsample_dlq, names.downsample_dlq, [('lambda', Arn('DownsampleDLQLambda'))]) config.add_lambda('DownsampleDLQLambda', names.downsample_dlq, lambda_role, const.DOWNSAMPLE_DLQ_LAMBDA, handler='index.handler', timeout=10) config.add_lambda_permission('DownsampleDLQLambdaExecute', Ref('DownsampleDLQLambda')) return config