Exemplo n.º 1
0
def create_config(bosslet_config, user_data=None):
    """
    Create the CloudFormationConfiguration object.
    Args:
        bosslet_config (BossConfiguration): target bosslet
        user_data (UserData): information used by the endpoint instance and vault.  Data will be run through the CloudFormation Fn::Join template intrinsic function so other template intrinsic functions used in the user_data will be parsed and executed.

    Returns: the config for the Cloud Formation stack

    """

    # Prepare user data for parsing by CloudFormation.
    if user_data is not None:
        parsed_user_data = { "Fn::Join" : ["", user_data.format_for_cloudformation()]}
    else:
        parsed_user_data = user_data

    keypair = bosslet_config.SSH_KEY
    session = bosslet_config.session
    names = bosslet_config.names
    config = CloudFormationConfiguration("cachedb", bosslet_config)

    vpc_id = config.find_vpc()

    #####
    # TODO: When CF config files are refactored for multi-account support
    #       the creation of _all_ subnets should be moved into core.
    #       AWS doesn't charge for the VPC or subnets, so it doesn't
    #       increase cost and cleans up subnet creation

    # Create several subnets for all the lambdas to use.
    internal_route_table_id = aws.rt_lookup(session, vpc_id, names.internal.rt)

    lambda_subnets = config.add_all_lambda_subnets()
    for lambda_subnet in lambda_subnets:
        key = lambda_subnet['Ref']
        config.add_route_table_association(key + "RTA",
                                           internal_route_table_id,
                                           lambda_subnet)

    # Create a custom resource to help delete ENIs from lambdas
    # DP NOTE: After deleting a lambda the ENIs may stick around for while, causing the stack delete to fail
    #          See https://stackoverflow.com/a/41310289
    config.add_arg(Arg.String('StackName', config.stack_name))
    config.add_arg(Arg.String('DeleteENILambda', aws.lambda_arn_lookup(session, names.delete_eni.lambda_)))
    config.add_custom_resource('DeleteENI', 'DeleteENI', Ref('DeleteENILambda'), StackName = Ref('StackName'))

    # Lookup the External Subnet, Internal Security Group IDs that are
    # needed by other resources
    internal_subnet_id = aws.subnet_id_lookup(session, names.internal.subnet)
    config.add_arg(Arg.Subnet("InternalSubnet",
                              internal_subnet_id,
                              "ID of Internal Subnet to create resources in"))

    internal_sg_id = aws.sg_lookup(session, vpc_id, names.internal.sg)
    config.add_arg(Arg.SecurityGroup("InternalSecurityGroup",
                                     internal_sg_id,
                                     "ID of internal Security Group"))

    role = aws.role_arn_lookup(session, "lambda_cache_execution")
    config.add_arg(Arg.String("LambdaCacheExecutionRole", role,
                              "IAM role for " + names.multi_lambda.lambda_))

    cuboid_import_role = aws.role_arn_lookup(session, CUBOID_IMPORT_ROLE)
    config.add_arg(Arg.String(CUBOID_IMPORT_ROLE, cuboid_import_role,
                              "IAM role for cuboidImport"))

    config.add_capabilities(['CAPABILITY_IAM'])
 
    cuboid_bucket_name = names.cuboid_bucket.s3
    if not aws.s3_bucket_exists(session, cuboid_bucket_name):
        config.add_s3_bucket("cuboidBucket", cuboid_bucket_name)

    config.add_s3_bucket_policy(
        "cuboidBucketPolicy", cuboid_bucket_name,
        ['s3:GetObject', 's3:PutObject'],
        { 'AWS': role})
    config.append_s3_bucket_policy(
        "cuboidBucketPolicy", cuboid_bucket_name,
        ['s3:PutObject'], { 'AWS': cuboid_import_role})

    delete_bucket_name = names.delete_bucket.s3
    if not aws.s3_bucket_exists(session, delete_bucket_name):
        config.add_s3_bucket("deleteBucket", delete_bucket_name)
    config.add_s3_bucket_policy(
        "deleteBucketPolicy", delete_bucket_name,
        ['s3:GetObject', 's3:PutObject'],
        { 'AWS': role})

    tile_bucket_name = names.tile_bucket.s3
    if not aws.s3_bucket_exists(session, tile_bucket_name):
        life_cycle_cfg = get_cf_bucket_life_cycle_rules()
        config.add_s3_bucket(
            "tileBucket", tile_bucket_name, life_cycle_config=life_cycle_cfg)

    config.add_s3_bucket_policy(
        "tileBucketPolicy", tile_bucket_name,
        ['s3:GetObject', 's3:PutObject'],
        { 'AWS': role})

    # The ingest bucket is a staging area for cuboids uploaded during volumetric ingest.
    creating_ingest_bucket = False
    ingest_bucket_name = names.ingest_bucket.s3
    if not aws.s3_bucket_exists(session, ingest_bucket_name):
        creating_ingest_bucket = True
        ing_bucket_life_cycle_cfg = get_cf_bucket_life_cycle_rules()
        config.add_s3_bucket("ingestBucket", ingest_bucket_name,
            life_cycle_config=ing_bucket_life_cycle_cfg)

    config.add_s3_bucket_policy(
        "ingestBucketPolicy", ingest_bucket_name,
        ['s3:GetObject', 's3:PutObject', 's3:PutObjectTagging'],
        { 'AWS': cuboid_import_role})

    config.add_ec2_instance("CacheManager",
                            names.cachemanager.dns,
                            aws.ami_lookup(bosslet_config, names.cachemanager.ami),
                            keypair,
                            subnet=Ref("InternalSubnet"),
                            public_ip=False,
                            type_=const.CACHE_MANAGER_TYPE,
                            security_groups=[Ref("InternalSecurityGroup")],
                            user_data=parsed_user_data,
                            role="cachemanager")

    config.add_sqs_queue(
        names.ingest_cleanup_dlq.sqs, names.ingest_cleanup_dlq.sqs, 30, 20160)
    config.add_sqs_queue(
        names.cuboid_import_dlq.sqs, names.cuboid_import_dlq.sqs, 30, 20160)

    config.add_sqs_policy('cuboidImportDlqPolicy', 'cuboidImportDlqPolicy',
        [Ref(names.cuboid_import_dlq.sqs)], cuboid_import_role)

    config.add_lambda("MultiLambda",
                      names.multi_lambda.lambda_,
                      Ref("LambdaCacheExecutionRole"),
                      handler='lambda_loader.handler',
                      timeout=120,
                      memory=1536,
                      security_groups=[Ref('InternalSecurityGroup')],
                      subnets=lambda_subnets)
    config.add_lambda("TileUploadedLambda",
                      names.tile_uploaded.lambda_,
                      Ref("LambdaCacheExecutionRole"),
                      handler='tile_uploaded_lambda.handler',
                      timeout=5,
                      memory=1024)
    config.add_lambda("TileIngestLambda",
                      names.tile_ingest.lambda_,
                      Ref("LambdaCacheExecutionRole"),
                      handler='tile_ingest_lambda.handler',
                      timeout=30,
                      memory=1536)
    config.add_lambda("DeleteTileObjsLambda",
                      names.delete_tile_objs.lambda_,
                      Ref("LambdaCacheExecutionRole"),
                      handler='delete_tile_objs_lambda.handler',
                      timeout=90,
                      memory=128,
                      dlq=Arn(names.ingest_cleanup_dlq.sqs))
    config.add_lambda("DeleteTileEntryLambda",
                      names.delete_tile_index_entry.lambda_,
                      Ref("LambdaCacheExecutionRole"),
                      handler='delete_tile_index_entry_lambda.handler',
                      timeout=90,
                      memory=128,
                      dlq=Arn(names.ingest_cleanup_dlq.sqs))
    config.add_lambda("CuboidImportLambda",
                      names.cuboid_import_lambda.lambda_,
                      Ref(CUBOID_IMPORT_ROLE),
                      handler='cuboid_import_lambda.handler',
                      timeout=90,
                      memory=128,
                      dlq=Arn(names.cuboid_import_dlq.sqs))
    config.add_lambda("VolumetricIngestLambda",
                      names.volumetric_ingest_queue_upload_lambda.lambda_,
                      Ref("LambdaCacheExecutionRole"),
                      handler='ingest_queue_upload_volumetric_lambda.handler',
                      timeout=120,
                      memory=1024)

    if creating_ingest_bucket:
        config.add_lambda_permission(
            'ingestBucketInvokeCuboidImportLambda', names.cuboid_import_lambda.lambda_,
            principal='s3.amazonaws.com', source={
                'Fn::Join': [':', ['arn', 'aws', 's3', '', '', ingest_bucket_name]]}, #DP TODO: move into constants
            depends_on=['ingestBucket', 'CuboidImportLambda']
        )
    else:
        # NOTE: this permission doesn't seem to apply properly when doing a
        # CloudFormation update.  During testing, I had to manually apply this
        # permission before the bucket trigger could be applied in post_init().
        # Doing a CloudFormation delete followed by a create did not have a
        # problem.
        config.add_lambda_permission(
            'ingestBucketInvokeCuboidImportLambda', names.cuboid_import_lambda.lambda_,
            principal='s3.amazonaws.com', source={
                'Fn::Join': [':', ['arn', 'aws', 's3', '', '', ingest_bucket_name]]},
            depends_on='CuboidImportLambda'
        )

    # Add topic to indicating that the object store has been write locked.
    # Now using "production mailing list" instead of separate write lock topic.
    #config.add_sns_topic('WriteLock',
    #                     names.write_lock_topic,
    #                     names.write_lock,
    #                     []) # TODO: add subscribers

    return config
Exemplo n.º 2
0
def create_config(session, domain):
    """Create the CloudFormationConfiguration object."""
    config = CloudFormationConfiguration('activities', domain, const.REGION)
    names = AWSNames(domain)

    global keypair
    keypair = aws.keypair_lookup(session)

    vpc_id = config.find_vpc(session)
    sgs = aws.sg_lookup_all(session, vpc_id)
    internal_subnets, _ = config.find_all_availability_zones(session)
    internal_subnets_lambda, _ = config.find_all_availability_zones(
        session, lambda_compatible_only=True)
    topic_arn = aws.sns_topic_lookup(session, "ProductionMicronsMailingList")
    event_data = {
        "lambda-name": "delete_lambda",
        "db": names.endpoint_db,
        "meta-db": names.meta,
        "s3-index-table": names.s3_index,
        "id-index-table": names.id_index,
        "id-count-table": names.id_count_index,
        "cuboid_bucket": names.cuboid_bucket,
        "delete_bucket": names.delete_bucket,
        "topic-arn": topic_arn,
        "query-deletes-sfn-name": names.query_deletes,
        "delete-sfn-name": names.delete_cuboid,
        "delete-exp-sfn-name": names.delete_experiment,
        "delete-coord-frame-sfn-name": names.delete_coord_frame,
        "delete-coll-sfn-name": names.delete_collection
    }

    role_arn = aws.role_arn_lookup(session, "events_for_delete_lambda")
    multi_lambda = names.multi_lambda
    lambda_arn = aws.lambda_arn_lookup(session, multi_lambda)
    target_list = [{
        "Arn": lambda_arn,
        "Id": multi_lambda,
        "Input": json.dumps(event_data)
    }]
    schedule_expression = "cron(1 6-11/1 ? * TUE-FRI *)"
    #schedule_expression = "cron(0/2 * * * ? *)"  # testing fire every two minutes

    config.add_event_rule("DeleteEventRule",
                          names.delete_event_rule,
                          role_arn=role_arn,
                          schedule_expression=schedule_expression,
                          target_list=target_list,
                          description=None)
    # Events have to be given permission to run lambda.
    config.add_lambda_permission('DeleteRulePerm',
                                 multi_lambda,
                                 principal='events.amazonaws.com',
                                 source=Arn('DeleteEventRule'))
    user_data = UserData()
    user_data["system"]["fqdn"] = names.activities
    user_data["system"]["type"] = "activities"
    user_data["aws"]["db"] = names.endpoint_db
    user_data["aws"]["cache"] = names.cache
    user_data["aws"]["cache-state"] = names.cache_state
    user_data["aws"]["cache-db"] = "0"
    user_data["aws"]["cache-state-db"] = "0"
    user_data["aws"]["meta-db"] = names.meta
    user_data["aws"]["cuboid_bucket"] = names.cuboid_bucket
    user_data["aws"]["tile_bucket"] = names.tile_bucket
    user_data["aws"]["ingest_bucket"] = names.ingest_bucket
    user_data["aws"]["s3-index-table"] = names.s3_index
    user_data["aws"]["tile-index-table"] = names.tile_index
    user_data["aws"]["id-index-table"] = names.id_index
    user_data["aws"]["id-count-table"] = names.id_count_index

    config.add_autoscale_group("Activities",
                               names.activities,
                               aws.ami_lookup(session, 'activities.boss'),
                               keypair,
                               subnets=internal_subnets_lambda,
                               type_=const.ACTIVITIES_TYPE,
                               security_groups=[sgs[names.internal]],
                               user_data=str(user_data),
                               role=aws.instance_profile_arn_lookup(
                                   session, "activities"),
                               min=1,
                               max=1)

    config.add_lambda("IngestLambda",
                      names.ingest_lambda,
                      aws.role_arn_lookup(session, 'IngestQueueUpload'),
                      const.INGEST_LAMBDA,
                      handler="index.handler",
                      timeout=60 * 5)

    config.add_lambda_permission("IngestLambdaExecute", Ref("IngestLambda"))

    return config
Exemplo n.º 3
0
def create_config(bosslet_config, lookup=True):
    """Create the CloudFormationConfiguration object."""
    config = CloudFormationConfiguration('activities', bosslet_config)
    names = bosslet_config.names
    keypair = bosslet_config.SSH_KEY
    session = bosslet_config.session

    vpc_id = config.find_vpc()
    sgs = aws.sg_lookup_all(session, vpc_id)
    internal_subnets, _ = config.find_all_subnets()
    internal_subnets_asg, _ = config.find_all_subnets(compatibility='asg')

    topic_arn = aws.sns_topic_lookup(session, bosslet_config.ALERT_TOPIC)
    if topic_arn is None:
        raise MissingResourceError('SNS topic', bosslet_config.ALERT_TOPIC)

    event_data = {
        "lambda-name": "delete_lambda",
        "db": names.endpoint_db.rds,
        "meta-db": names.meta.ddb,
        "s3-index-table": names.s3_index.ddb,
        "id-index-table": names.id_index.ddb,
        "id-count-table": names.id_count_index.ddb,
        "cuboid_bucket": names.cuboid_bucket.s3,
        "delete_bucket": names.delete_bucket.s3,
        "topic-arn": topic_arn,
        "query-deletes-sfn-name": names.query_deletes.sfn,
        "delete-sfn-name": names.delete_cuboid.sfn,
        "delete-exp-sfn-name": names.delete_experiment.sfn,
        "delete-coord-frame-sfn-name": names.delete_coord_frame.sfn,
        "delete-coll-sfn-name": names.delete_collection.sfn,
    }

    role_arn = aws.role_arn_lookup(session, "events_for_delete_lambda")
    multi_lambda = names.multi_lambda.lambda_
    if lookup:
        lambda_arn = aws.lambda_arn_lookup(session, multi_lambda)
    else:
        lambda_arn = None
    target_list = [{
        "Arn": lambda_arn,
        "Id": multi_lambda,
        "Input": json.dumps(event_data)
    }]
    schedule_expression = "cron(1 6-11/1 ? * TUE-FRI *)"
    #schedule_expression = "cron(0/2 * * * ? *)"  # testing fire every two minutes

    config.add_event_rule("DeleteEventRule",
                          # XXX What type for event rules?
                          names.delete_event_rule.dns,
                          role_arn=role_arn,
                          schedule_expression=schedule_expression,
                          target_list=target_list,
                          state='DISABLED')   # Disabled until new delete is finished.

    # Events have to be given permission to run lambda.
    config.add_lambda_permission('DeleteRulePerm',
                                 multi_lambda,
                                 principal='events.amazonaws.com',
                                 source=Arn('DeleteEventRule'))
    user_data = UserData()
    user_data["system"]["fqdn"] = names.activities.dns
    user_data["system"]["type"] = "activities"
    user_data["aws"]["db"] = names.endpoint_db.rds
    user_data["aws"]["cache"] = names.cache.redis
    user_data["aws"]["cache-state"] = names.cache_state.redis
    user_data["aws"]["cache-db"] = "0"
    user_data["aws"]["cache-state-db"] = "0"
    user_data["aws"]["meta-db"] = names.meta.ddb
    user_data["aws"]["cuboid_bucket"] = names.cuboid_bucket.s3
    user_data["aws"]["tile_bucket"] = names.tile_bucket.s3
    user_data["aws"]["ingest_bucket"] = names.ingest_bucket.s3
    user_data["aws"]["s3-index-table"] = names.s3_index.ddb
    user_data["aws"]["tile-index-table"] = names.tile_index.ddb
    user_data["aws"]["id-index-table"] = names.id_index.ddb
    user_data["aws"]["id-count-table"] = names.id_count_index.ddb
    user_data["aws"]["max_task_id_suffix"] = str(const.MAX_TASK_ID_SUFFIX)
    user_data["aws"]["tile_ingest_lambda"] = names.tile_ingest.lambda_
    user_data["aws"]["tile_uploaded_lambda"] = names.tile_uploaded.lambda_

    config.add_autoscale_group("Activities",
                               names.activities.dns,
                               aws.ami_lookup(bosslet_config, names.activities.ami),
                               keypair,
                               subnets=internal_subnets_asg,
                               type_=const.ACTIVITIES_TYPE,
                               security_groups=[sgs[names.internal.sg]],
                               user_data=str(user_data),
                               role=aws.instance_profile_arn_lookup(session, "activities"),
                               min=1,
                               max=1)

    config.add_lambda("IngestLambda",
                      names.ingest_lambda.lambda_,
                      aws.role_arn_lookup(session, 'IngestQueueUpload'),
                      const.INGEST_LAMBDA,
                      handler="index.handler",
                      timeout=60 * 5,
                      runtime='python3.6',
                      memory=3008)

    config.add_lambda_permission("IngestLambdaExecute", Ref("IngestLambda"))


    # Downsample / Resolution Hierarchy support
    lambda_role = aws.role_arn_lookup(session, "lambda_resolution_hierarchy")

    config.add_lambda("DownsampleVolumeLambda",
                      names.downsample_volume.lambda_,
                      lambda_role,
                      handler="downsample_volume.handler",
                      timeout=120,
                      memory=1024,
                      dlq = Ref('DownsampleDLQ'))

    start_sfn_lambda_role = aws.role_arn_lookup(session, 'StartStepFcnLambdaRole')
    config.add_lambda("startSfnLambda",
               names.start_sfn.lambda_,
               start_sfn_lambda_role,
               handler="start_sfn_lambda.handler",
               timeout=60,
               memory=128)

    # This dead letter queue behavior uses a lambda to put failed lambda
    # executions into a dlqs created specifically for each downsample job.
    # There is a separate dlq for each resolution.
    config.add_sns_topic("DownsampleDLQ",
                         names.downsample_dlq.sns,
                         names.downsample_dlq.sns,
                         [('lambda', Arn('DownsampleDLQLambda'))])

    config.add_lambda('DownsampleDLQLambda',
                      names.downsample_dlq.lambda_,
                      lambda_role,
                      const.DOWNSAMPLE_DLQ_LAMBDA,
                      handler='index.handler',
                      runtime='python3.7',
                      timeout=10)

    config.add_lambda_permission('DownsampleDLQLambdaExecute',
                                 Ref('DownsampleDLQLambda'))

    return config
Exemplo n.º 4
0
def create_config(session, domain):
    """Create the CloudFormationConfiguration object."""
    config = CloudFormationConfiguration('activities', domain, const.REGION)
    names = AWSNames(domain)

    global keypair
    keypair = aws.keypair_lookup(session)

    vpc_id = config.find_vpc(session)
    sgs = aws.sg_lookup_all(session, vpc_id)
    internal_subnets, _ = config.find_all_availability_zones(session)
    internal_subnets_lambda, _ = config.find_all_availability_zones(session, lambda_compatible_only=True)
    topic_arn = aws.sns_topic_lookup(session, "ProductionMicronsMailingList")
    event_data = {
        "lambda-name": "delete_lambda",
        "db": names.endpoint_db,
        "meta-db": names.meta,
        "s3-index-table": names.s3_index,
        "id-index-table": names.id_index,
        "id-count-table": names.id_count_index,
        "cuboid_bucket": names.cuboid_bucket,
        "delete_bucket": names.delete_bucket,
        "topic-arn": topic_arn,
        "query-deletes-sfn-name": names.query_deletes,
        "delete-sfn-name": names.delete_cuboid,
        "delete-exp-sfn-name": names.delete_experiment,
        "delete-coord-frame-sfn-name": names.delete_coord_frame,
        "delete-coll-sfn-name": names.delete_collection
    }

    role_arn = aws.role_arn_lookup(session, "events_for_delete_lambda")
    multi_lambda = names.multi_lambda
    lambda_arn = aws.lambda_arn_lookup(session, multi_lambda)
    target_list = [{
        "Arn": lambda_arn,
        "Id": multi_lambda,
        "Input": json.dumps(event_data)
    }]
    schedule_expression = "cron(1 6-11/1 ? * TUE-FRI *)"
    #schedule_expression = "cron(0/2 * * * ? *)"  # testing fire every two minutes

    config.add_event_rule("DeleteEventRule", names.delete_event_rule, role_arn=role_arn,
                          schedule_expression=schedule_expression, target_list=target_list, description=None)
    # Events have to be given permission to run lambda.
    config.add_lambda_permission('DeleteRulePerm', multi_lambda, principal='events.amazonaws.com',
                                 source=Arn('DeleteEventRule'))
    user_data = UserData()
    user_data["system"]["fqdn"] = names.activities
    user_data["system"]["type"] = "activities"
    user_data["aws"]["db"] = names.endpoint_db
    user_data["aws"]["cache"] = names.cache
    user_data["aws"]["cache-state"] = names.cache_state
    user_data["aws"]["cache-db"] = "0"
    user_data["aws"]["cache-state-db"] = "0"
    user_data["aws"]["meta-db"] = names.meta
    user_data["aws"]["cuboid_bucket"] = names.cuboid_bucket
    user_data["aws"]["tile_bucket"] = names.tile_bucket
    user_data["aws"]["ingest_bucket"] = names.ingest_bucket
    user_data["aws"]["s3-index-table"] = names.s3_index
    user_data["aws"]["tile-index-table"] = names.tile_index
    user_data["aws"]["id-index-table"] = names.id_index
    user_data["aws"]["id-count-table"] = names.id_count_index
    user_data["aws"]["max_task_id_suffix"] = str(const.MAX_TASK_ID_SUFFIX)

    config.add_autoscale_group("Activities",
                               names.activities,
                               aws.ami_lookup(session, 'activities.boss'),
                               keypair,
                               subnets=internal_subnets_lambda,
                               type_=const.ACTIVITIES_TYPE,
                               security_groups=[sgs[names.internal]],
                               user_data=str(user_data),
                               role=aws.instance_profile_arn_lookup(session, "activities"),
                               min=1,
                               max=1)

    config.add_lambda("IngestLambda",
                      names.ingest_lambda,
                      aws.role_arn_lookup(session, 'IngestQueueUpload'),
                      const.INGEST_LAMBDA,
                      handler="index.handler",
                      timeout=60 * 5,
                      memory=3008)

    config.add_lambda_permission("IngestLambdaExecute", Ref("IngestLambda"))


    # Downsample / Resolution Hierarchy support
    lambda_role = aws.role_arn_lookup(session, "lambda_resolution_hierarchy")

    config.add_lambda("DownsampleVolumeLambda",
                      names.downsample_volume_lambda,
                      lambda_role,
                      s3=(aws.get_lambda_s3_bucket(session),
                          "multilambda.{}.zip".format(domain),
                          "downsample_volume.handler"),
                      timeout=120,
                      memory=1024,
                      runtime='python3.6',
                      dlq = Ref('DownsampleDLQ'))

    config.add_sns_topic("DownsampleDLQ",
                         names.downsample_dlq,
                         names.downsample_dlq,
                         [('lambda', Arn('DownsampleDLQLambda'))])

    config.add_lambda('DownsampleDLQLambda',
                      names.downsample_dlq,
                      lambda_role,
                      const.DOWNSAMPLE_DLQ_LAMBDA,
                      handler='index.handler',
                      timeout=10)

    config.add_lambda_permission('DownsampleDLQLambdaExecute',
                                 Ref('DownsampleDLQLambda'))

    return config