コード例 #1
0
ファイル: copy_amis.py プロジェクト: reconstrue/boss-manage
def copy_amis(bosslet_config, ami_ending, new_ami_ending):
    """
    changes Route53 entry for the api in domain to use cloudfront for the s3 maintenance bucket.
    Args:
        session(Session): boto3 session object
        ami_ending(str): short hash attached to AMIs to copy from
        new_ami_ending(str): new post_name to assign AMI copies.

    Returns:
        Nothing
    """
    client = bosslet_config.session.client("ec2")
    for prefix in AMIS:
        prefix += bosslet_config.AMI_SUFFIX
        (ami_id, hash) = aws.ami_lookup(session, prefix, version=ami_ending)
        print(str(ami_id))
        try:
            response = client.copy_image(
                SourceRegion=bosslet_config.REGION,
                SourceImageId=ami_id,
                Name=prefix + "-" + new_ami_ending,
                Description="Copied from ami id {}".format(ami_id))
            pprint.pprint(response)
        except:
            traceback.print_exc()
コード例 #2
0
def create_config(session, domain, keypair=None, user_data=None, db_config={}):
    """Create the CloudFormationConfiguration object."""
    names = AWSNames(domain)
    config = CloudFormationConfiguration('proofreader', domain, const.REGION)

    vpc_id = config.find_vpc(session)
    az_subnets, _ = config.find_all_availability_zones(session)

    external_subnet_id = aws.subnet_id_lookup(session,
                                              names.subnet("external"))
    config.add_arg(
        Arg.Subnet("ExternalSubnet", external_subnet_id,
                   "ID of External Subnet to create resources in"))

    sgs = aws.sg_lookup_all(session, vpc_id)

    # Only allow unsecure web access from APL, until a ELB is configured for HTTPS
    config.add_security_group("HttpSecurityGroup", names.http,
                              [("tcp", "80", "80", const.INCOMING_SUBNET)])

    config.add_ec2_instance("ProofreaderWeb",
                            names.proofreader,
                            aws.ami_lookup(session, "proofreader-web.boss"),
                            keypair,
                            public_ip=True,
                            subnet=Ref("ExternalSubnet"),
                            security_groups=[
                                sgs[names.internal], sgs[names.ssh],
                                Ref('HttpSecurityGroup')
                            ],
                            user_data=user_data,
                            depends_on="ProofreaderDB"
                            )  # make sure the DB is launched before we start

    config.add_rds_db("ProofreaderDB",
                      names.proofreader_db,
                      db_config.get("port"),
                      db_config.get("name"),
                      db_config.get("user"),
                      db_config.get("password"),
                      az_subnets,
                      security_groups=[sgs[names.internal]])

    return config
コード例 #3
0
def vault_pipeline(bosslet_config, directory):
    internal_subnet = subnet_id_lookup(bosslet_config)

    names = bosslet_config.names
    s3_backup = "s3://" + names.backup.s3 + "/" + directory
    s3_log = "s3://" + names.backup.s3 + "/restore-logs/"
    cmd = "/usr/local/bin/python3 ~/vault.py restore {}".format(bosslet_config.INTERNAL_DOMAIN)

    _, data = list_s3_bucket(bosslet_config.session, names.backup.s3, directory + "/vault")
    if len(data) == 0:
        print("No vault data backed up on {}, skipping restore".format(directory))
        return None

    pipeline = DataPipeline(fmt="DP", log_uri = s3_log, resource_role="backup")
    pipeline.add_shell_command("VaultRestore",
                               cmd,
                               source = Ref("VaultBucket"),
                               runs_on = Ref("VaultInstance"))
    pipeline.add_ec2_instance("VaultInstance",
                              subnet = internal_subnet,
                              image = aws.ami_lookup(bosslet_config, names.backup.ami)[0])
    pipeline.add_s3_bucket("VaultBucket", s3_backup + "/vault")
    return pipeline
コード例 #4
0
ファイル: copy_amis.py プロジェクト: jhuapl-boss/boss-manage
def copy_amis(session, ami_ending, new_ami_ending):
    """
    changes Route53 entry for the api in domain to use cloudfront for the s3 maintenance bucket.
    Args:
        session(Session): boto3 session object
        ami_ending(str): short hash attached to AMIs to copy from
        new_ami_ending(str): new post_name to assign AMI copies.

    Returns:
        Nothing
    """
    client = session.client("ec2")
    for prefix in AMIS:
        (ami_id, hash) = aws.ami_lookup(session, prefix, version=ami_ending)
        print(str(ami_id))
        try:
            response = client.copy_image(SourceRegion=session.region_name,
                                         SourceImageId=ami_id,
                                         Name=prefix + "-" + new_ami_ending,
                                         Description="Copied from ami id {}".format(ami_id))
            pprint.pprint(response)
        except:
            traceback.print_exc()
コード例 #5
0
def rds_pipeline(bosslet_config, directory, component, rds_name):
    names = bosslet_config.names
    subnet = subnet_id_lookup(bosslet_config)

    s3_backup = "s3://" + names.backup.s3 + "/" + directory
    s3_log = "s3://" + names.backup.s3 + "/restore-logs/"


    _, data = list_s3_bucket(bosslet_config.session, names.backup.s3, directory + "/RDS/" + rds_name)
    if len(data) == 0:
        print("No {} table data backed up on {}, skipping restore".format(component, directory))
        return None

    pipeline = DataPipeline(fmt="DP", log_uri = s3_log, resource_role="backup")
    pipeline.add_shell_command("RDSRestore",
                               "bash ~/rds.sh restore {}".format(rds_name),
                               source = Ref("RDSBucket"),
                               runs_on = Ref("RDSInstance"))
    pipeline.add_ec2_instance("RDSInstance",
                              subnet = subnet,
                              image = aws.ami_lookup(bosslet_config, names.backup.ami)[0])
    pipeline.add_s3_bucket("RDSBucket", s3_backup + "/RDS/" + rds_name)

    return pipeline
コード例 #6
0
ファイル: cachedb.py プロジェクト: reconstrue/boss-manage
def create_config(bosslet_config, user_data=None):
    """
    Create the CloudFormationConfiguration object.
    Args:
        bosslet_config (BossConfiguration): target bosslet
        user_data (UserData): information used by the endpoint instance and vault.  Data will be run through the CloudFormation Fn::Join template intrinsic function so other template intrinsic functions used in the user_data will be parsed and executed.

    Returns: the config for the Cloud Formation stack

    """

    # Prepare user data for parsing by CloudFormation.
    if user_data is not None:
        parsed_user_data = { "Fn::Join" : ["", user_data.format_for_cloudformation()]}
    else:
        parsed_user_data = user_data

    keypair = bosslet_config.SSH_KEY
    session = bosslet_config.session
    names = bosslet_config.names
    config = CloudFormationConfiguration("cachedb", bosslet_config)

    vpc_id = config.find_vpc()

    #####
    # TODO: When CF config files are refactored for multi-account support
    #       the creation of _all_ subnets should be moved into core.
    #       AWS doesn't charge for the VPC or subnets, so it doesn't
    #       increase cost and cleans up subnet creation

    # Create several subnets for all the lambdas to use.
    internal_route_table_id = aws.rt_lookup(session, vpc_id, names.internal.rt)

    lambda_subnets = config.add_all_lambda_subnets()
    for lambda_subnet in lambda_subnets:
        key = lambda_subnet['Ref']
        config.add_route_table_association(key + "RTA",
                                           internal_route_table_id,
                                           lambda_subnet)

    # Create a custom resource to help delete ENIs from lambdas
    # DP NOTE: After deleting a lambda the ENIs may stick around for while, causing the stack delete to fail
    #          See https://stackoverflow.com/a/41310289
    config.add_arg(Arg.String('StackName', config.stack_name))
    config.add_arg(Arg.String('DeleteENILambda', aws.lambda_arn_lookup(session, names.delete_eni.lambda_)))
    config.add_custom_resource('DeleteENI', 'DeleteENI', Ref('DeleteENILambda'), StackName = Ref('StackName'))

    # Lookup the External Subnet, Internal Security Group IDs that are
    # needed by other resources
    internal_subnet_id = aws.subnet_id_lookup(session, names.internal.subnet)
    config.add_arg(Arg.Subnet("InternalSubnet",
                              internal_subnet_id,
                              "ID of Internal Subnet to create resources in"))

    internal_sg_id = aws.sg_lookup(session, vpc_id, names.internal.sg)
    config.add_arg(Arg.SecurityGroup("InternalSecurityGroup",
                                     internal_sg_id,
                                     "ID of internal Security Group"))

    role = aws.role_arn_lookup(session, "lambda_cache_execution")
    config.add_arg(Arg.String("LambdaCacheExecutionRole", role,
                              "IAM role for " + names.multi_lambda.lambda_))

    cuboid_import_role = aws.role_arn_lookup(session, CUBOID_IMPORT_ROLE)
    config.add_arg(Arg.String(CUBOID_IMPORT_ROLE, cuboid_import_role,
                              "IAM role for cuboidImport"))

    config.add_capabilities(['CAPABILITY_IAM'])
 
    cuboid_bucket_name = names.cuboid_bucket.s3
    if not aws.s3_bucket_exists(session, cuboid_bucket_name):
        config.add_s3_bucket("cuboidBucket", cuboid_bucket_name)

    config.add_s3_bucket_policy(
        "cuboidBucketPolicy", cuboid_bucket_name,
        ['s3:GetObject', 's3:PutObject'],
        { 'AWS': role})
    config.append_s3_bucket_policy(
        "cuboidBucketPolicy", cuboid_bucket_name,
        ['s3:PutObject'], { 'AWS': cuboid_import_role})

    delete_bucket_name = names.delete_bucket.s3
    if not aws.s3_bucket_exists(session, delete_bucket_name):
        config.add_s3_bucket("deleteBucket", delete_bucket_name)
    config.add_s3_bucket_policy(
        "deleteBucketPolicy", delete_bucket_name,
        ['s3:GetObject', 's3:PutObject'],
        { 'AWS': role})

    tile_bucket_name = names.tile_bucket.s3
    if not aws.s3_bucket_exists(session, tile_bucket_name):
        life_cycle_cfg = get_cf_bucket_life_cycle_rules()
        config.add_s3_bucket(
            "tileBucket", tile_bucket_name, life_cycle_config=life_cycle_cfg)

    config.add_s3_bucket_policy(
        "tileBucketPolicy", tile_bucket_name,
        ['s3:GetObject', 's3:PutObject'],
        { 'AWS': role})

    # The ingest bucket is a staging area for cuboids uploaded during volumetric ingest.
    creating_ingest_bucket = False
    ingest_bucket_name = names.ingest_bucket.s3
    if not aws.s3_bucket_exists(session, ingest_bucket_name):
        creating_ingest_bucket = True
        ing_bucket_life_cycle_cfg = get_cf_bucket_life_cycle_rules()
        config.add_s3_bucket("ingestBucket", ingest_bucket_name,
            life_cycle_config=ing_bucket_life_cycle_cfg)

    config.add_s3_bucket_policy(
        "ingestBucketPolicy", ingest_bucket_name,
        ['s3:GetObject', 's3:PutObject', 's3:PutObjectTagging'],
        { 'AWS': cuboid_import_role})

    config.add_ec2_instance("CacheManager",
                            names.cachemanager.dns,
                            aws.ami_lookup(bosslet_config, names.cachemanager.ami),
                            keypair,
                            subnet=Ref("InternalSubnet"),
                            public_ip=False,
                            type_=const.CACHE_MANAGER_TYPE,
                            security_groups=[Ref("InternalSecurityGroup")],
                            user_data=parsed_user_data,
                            role="cachemanager")

    config.add_sqs_queue(
        names.ingest_cleanup_dlq.sqs, names.ingest_cleanup_dlq.sqs, 30, 20160)
    config.add_sqs_queue(
        names.cuboid_import_dlq.sqs, names.cuboid_import_dlq.sqs, 30, 20160)

    config.add_sqs_policy('cuboidImportDlqPolicy', 'cuboidImportDlqPolicy',
        [Ref(names.cuboid_import_dlq.sqs)], cuboid_import_role)

    config.add_lambda("MultiLambda",
                      names.multi_lambda.lambda_,
                      Ref("LambdaCacheExecutionRole"),
                      handler='lambda_loader.handler',
                      timeout=120,
                      memory=1536,
                      security_groups=[Ref('InternalSecurityGroup')],
                      subnets=lambda_subnets)
    config.add_lambda("TileUploadedLambda",
                      names.tile_uploaded.lambda_,
                      Ref("LambdaCacheExecutionRole"),
                      handler='tile_uploaded_lambda.handler',
                      timeout=5,
                      memory=1024)
    config.add_lambda("TileIngestLambda",
                      names.tile_ingest.lambda_,
                      Ref("LambdaCacheExecutionRole"),
                      handler='tile_ingest_lambda.handler',
                      timeout=30,
                      memory=1536)
    config.add_lambda("DeleteTileObjsLambda",
                      names.delete_tile_objs.lambda_,
                      Ref("LambdaCacheExecutionRole"),
                      handler='delete_tile_objs_lambda.handler',
                      timeout=90,
                      memory=128,
                      dlq=Arn(names.ingest_cleanup_dlq.sqs))
    config.add_lambda("DeleteTileEntryLambda",
                      names.delete_tile_index_entry.lambda_,
                      Ref("LambdaCacheExecutionRole"),
                      handler='delete_tile_index_entry_lambda.handler',
                      timeout=90,
                      memory=128,
                      dlq=Arn(names.ingest_cleanup_dlq.sqs))
    config.add_lambda("CuboidImportLambda",
                      names.cuboid_import_lambda.lambda_,
                      Ref(CUBOID_IMPORT_ROLE),
                      handler='cuboid_import_lambda.handler',
                      timeout=90,
                      memory=128,
                      dlq=Arn(names.cuboid_import_dlq.sqs))
    config.add_lambda("VolumetricIngestLambda",
                      names.volumetric_ingest_queue_upload_lambda.lambda_,
                      Ref("LambdaCacheExecutionRole"),
                      handler='ingest_queue_upload_volumetric_lambda.handler',
                      timeout=120,
                      memory=1024)

    if creating_ingest_bucket:
        config.add_lambda_permission(
            'ingestBucketInvokeCuboidImportLambda', names.cuboid_import_lambda.lambda_,
            principal='s3.amazonaws.com', source={
                'Fn::Join': [':', ['arn', 'aws', 's3', '', '', ingest_bucket_name]]}, #DP TODO: move into constants
            depends_on=['ingestBucket', 'CuboidImportLambda']
        )
    else:
        # NOTE: this permission doesn't seem to apply properly when doing a
        # CloudFormation update.  During testing, I had to manually apply this
        # permission before the bucket trigger could be applied in post_init().
        # Doing a CloudFormation delete followed by a create did not have a
        # problem.
        config.add_lambda_permission(
            'ingestBucketInvokeCuboidImportLambda', names.cuboid_import_lambda.lambda_,
            principal='s3.amazonaws.com', source={
                'Fn::Join': [':', ['arn', 'aws', 's3', '', '', ingest_bucket_name]]},
            depends_on='CuboidImportLambda'
        )

    # Add topic to indicating that the object store has been write locked.
    # Now using "production mailing list" instead of separate write lock topic.
    #config.add_sns_topic('WriteLock',
    #                     names.write_lock_topic,
    #                     names.write_lock,
    #                     []) # TODO: add subscribers

    return config
コード例 #7
0
def create_config(bosslet_config, lookup=True):
    """Create the CloudFormationConfiguration object."""
    config = CloudFormationConfiguration('activities', bosslet_config)
    names = bosslet_config.names
    keypair = bosslet_config.SSH_KEY
    session = bosslet_config.session

    vpc_id = config.find_vpc()
    sgs = aws.sg_lookup_all(session, vpc_id)
    internal_subnets, _ = config.find_all_subnets()
    internal_subnets_asg, _ = config.find_all_subnets(compatibility='asg')

    topic_arn = aws.sns_topic_lookup(session, bosslet_config.ALERT_TOPIC)
    if topic_arn is None:
        raise MissingResourceError('SNS topic', bosslet_config.ALERT_TOPIC)

    event_data = {
        "lambda-name": "delete_lambda",
        "db": names.endpoint_db.rds,
        "meta-db": names.meta.ddb,
        "s3-index-table": names.s3_index.ddb,
        "id-index-table": names.id_index.ddb,
        "id-count-table": names.id_count_index.ddb,
        "cuboid_bucket": names.cuboid_bucket.s3,
        "delete_bucket": names.delete_bucket.s3,
        "topic-arn": topic_arn,
        "query-deletes-sfn-name": names.query_deletes.sfn,
        "delete-sfn-name": names.delete_cuboid.sfn,
        "delete-exp-sfn-name": names.delete_experiment.sfn,
        "delete-coord-frame-sfn-name": names.delete_coord_frame.sfn,
        "delete-coll-sfn-name": names.delete_collection.sfn,
    }

    role_arn = aws.role_arn_lookup(session, "events_for_delete_lambda")
    multi_lambda = names.multi_lambda.lambda_
    if lookup:
        lambda_arn = aws.lambda_arn_lookup(session, multi_lambda)
    else:
        lambda_arn = None
    target_list = [{
        "Arn": lambda_arn,
        "Id": multi_lambda,
        "Input": json.dumps(event_data)
    }]
    schedule_expression = "cron(1 6-11/1 ? * TUE-FRI *)"
    #schedule_expression = "cron(0/2 * * * ? *)"  # testing fire every two minutes

    config.add_event_rule("DeleteEventRule",
                          # XXX What type for event rules?
                          names.delete_event_rule.dns,
                          role_arn=role_arn,
                          schedule_expression=schedule_expression,
                          target_list=target_list,
                          state='DISABLED')   # Disabled until new delete is finished.

    # Events have to be given permission to run lambda.
    config.add_lambda_permission('DeleteRulePerm',
                                 multi_lambda,
                                 principal='events.amazonaws.com',
                                 source=Arn('DeleteEventRule'))
    user_data = UserData()
    user_data["system"]["fqdn"] = names.activities.dns
    user_data["system"]["type"] = "activities"
    user_data["aws"]["db"] = names.endpoint_db.rds
    user_data["aws"]["cache"] = names.cache.redis
    user_data["aws"]["cache-state"] = names.cache_state.redis
    user_data["aws"]["cache-db"] = "0"
    user_data["aws"]["cache-state-db"] = "0"
    user_data["aws"]["meta-db"] = names.meta.ddb
    user_data["aws"]["cuboid_bucket"] = names.cuboid_bucket.s3
    user_data["aws"]["tile_bucket"] = names.tile_bucket.s3
    user_data["aws"]["ingest_bucket"] = names.ingest_bucket.s3
    user_data["aws"]["s3-index-table"] = names.s3_index.ddb
    user_data["aws"]["tile-index-table"] = names.tile_index.ddb
    user_data["aws"]["id-index-table"] = names.id_index.ddb
    user_data["aws"]["id-count-table"] = names.id_count_index.ddb
    user_data["aws"]["max_task_id_suffix"] = str(const.MAX_TASK_ID_SUFFIX)
    user_data["aws"]["tile_ingest_lambda"] = names.tile_ingest.lambda_
    user_data["aws"]["tile_uploaded_lambda"] = names.tile_uploaded.lambda_

    config.add_autoscale_group("Activities",
                               names.activities.dns,
                               aws.ami_lookup(bosslet_config, names.activities.ami),
                               keypair,
                               subnets=internal_subnets_asg,
                               type_=const.ACTIVITIES_TYPE,
                               security_groups=[sgs[names.internal.sg]],
                               user_data=str(user_data),
                               role=aws.instance_profile_arn_lookup(session, "activities"),
                               min=1,
                               max=1)

    config.add_lambda("IngestLambda",
                      names.ingest_lambda.lambda_,
                      aws.role_arn_lookup(session, 'IngestQueueUpload'),
                      const.INGEST_LAMBDA,
                      handler="index.handler",
                      timeout=60 * 5,
                      runtime='python3.6',
                      memory=3008)

    config.add_lambda_permission("IngestLambdaExecute", Ref("IngestLambda"))


    # Downsample / Resolution Hierarchy support
    lambda_role = aws.role_arn_lookup(session, "lambda_resolution_hierarchy")

    config.add_lambda("DownsampleVolumeLambda",
                      names.downsample_volume.lambda_,
                      lambda_role,
                      handler="downsample_volume.handler",
                      timeout=120,
                      memory=1024,
                      dlq = Ref('DownsampleDLQ'))

    start_sfn_lambda_role = aws.role_arn_lookup(session, 'StartStepFcnLambdaRole')
    config.add_lambda("startSfnLambda",
               names.start_sfn.lambda_,
               start_sfn_lambda_role,
               handler="start_sfn_lambda.handler",
               timeout=60,
               memory=128)

    # This dead letter queue behavior uses a lambda to put failed lambda
    # executions into a dlqs created specifically for each downsample job.
    # There is a separate dlq for each resolution.
    config.add_sns_topic("DownsampleDLQ",
                         names.downsample_dlq.sns,
                         names.downsample_dlq.sns,
                         [('lambda', Arn('DownsampleDLQLambda'))])

    config.add_lambda('DownsampleDLQLambda',
                      names.downsample_dlq.lambda_,
                      lambda_role,
                      const.DOWNSAMPLE_DLQ_LAMBDA,
                      handler='index.handler',
                      runtime='python3.7',
                      timeout=10)

    config.add_lambda_permission('DownsampleDLQLambdaExecute',
                                 Ref('DownsampleDLQLambda'))

    return config
コード例 #8
0
ファイル: cachedb.py プロジェクト: jhuapl-boss/boss-manage
def create_config(session, domain, keypair=None, user_data=None):
    """
    Create the CloudFormationConfiguration object.
    Args:
        session: amazon session object
        domain: domain of the stack being created
        keypair: keypair used to by instances being created
        user_data (UserData): information used by the endpoint instance and vault.  Data will be run through the CloudFormation Fn::Join template intrinsic function so other template intrinsic functions used in the user_data will be parsed and executed.

    Returns: the config for the Cloud Formation stack

    """

    # Prepare user data for parsing by CloudFormation.
    if user_data is not None:
        parsed_user_data = { "Fn::Join" : ["", user_data.format_for_cloudformation()]}
    else:
        parsed_user_data = user_data

    names = AWSNames(domain)
    config = CloudFormationConfiguration("cachedb", domain, const.REGION)

    vpc_id = config.find_vpc(session)

    # Create several subnets for all the lambdas to use.
    lambda_azs = aws.azs_lookup(session, lambda_compatible_only=True)
    internal_route_table_id = aws.rt_lookup(session, vpc_id, names.internal)

    print("AZs for lambda: " + str(lambda_azs))
    lambda_subnets = []
    for i in range(const.LAMBDA_SUBNETS):
        key = 'LambdaSubnet{}'.format(i)
        lambda_subnets.append(Ref(key))
        config.add_subnet(key, names.subnet('lambda{}'.format(i)), az=lambda_azs[i % len(lambda_azs)][0])
        config.add_route_table_association(key + "RTA",
                                           internal_route_table_id,
                                           Ref(key))

    # Lookup the External Subnet, Internal Security Group IDs that are
    # needed by other resources
    internal_subnet_id = aws.subnet_id_lookup(session, names.subnet("internal"))
    config.add_arg(Arg.Subnet("InternalSubnet",
                              internal_subnet_id,
                              "ID of Internal Subnet to create resources in"))

    internal_sg_id = aws.sg_lookup(session, vpc_id, names.internal)
    config.add_arg(Arg.SecurityGroup("InternalSecurityGroup",
                                     internal_sg_id,
                                     "ID of internal Security Group"))

    role = aws.role_arn_lookup(session, "lambda_cache_execution")
    config.add_arg(Arg.String("LambdaCacheExecutionRole", role,
                              "IAM role for multilambda." + domain))

    cuboid_import_role = aws.role_arn_lookup(session, CUBOID_IMPORT_ROLE)
    config.add_arg(Arg.String(CUBOID_IMPORT_ROLE, cuboid_import_role,
                              "IAM role for cuboidImport." + domain))

    config.add_capabilities(['CAPABILITY_IAM'])
 
    # Allow updating S3 index table with cuboid's object key during
    # volumetric ingest.
    # Example of s3_index_arn form: arn:aws:dynamodb:us-east-1:12345678:table/s3index.*.boss
    config.add_iam_policy_to_role(
        'S3IndexPutItem{}'.format(domain).replace('.', ''),
        get_s3_index_arn(session, domain).replace(domain,'*.') + domain.split('.')[1],
        [CUBOID_IMPORT_ROLE], ['dynamodb:PutItem'])

    cuboid_bucket_name = names.cuboid_bucket
    if not aws.s3_bucket_exists(session, cuboid_bucket_name):
        config.add_s3_bucket("cuboidBucket", cuboid_bucket_name)
    config.add_s3_bucket_policy(
        "cuboidBucketPolicy", cuboid_bucket_name,
        ['s3:GetObject', 's3:PutObject'],
        { 'AWS': role})
    config.append_s3_bucket_policy(
        "cuboidBucketPolicy", cuboid_bucket_name,
        ['s3:PutObject'], { 'AWS': cuboid_import_role})

    delete_bucket_name = names.delete_bucket
    if not aws.s3_bucket_exists(session, delete_bucket_name):
        config.add_s3_bucket("deleteBucket", delete_bucket_name)
    config.add_s3_bucket_policy(
        "deleteBucketPolicy", delete_bucket_name,
        ['s3:GetObject', 's3:PutObject'],
        { 'AWS': role})

    tile_bucket_name = names.tile_bucket
    if not aws.s3_bucket_exists(session, tile_bucket_name):
        life_cycle_cfg = get_cf_bucket_life_cycle_rules()
        config.add_s3_bucket(
            "tileBucket", tile_bucket_name, life_cycle_config=life_cycle_cfg)

    config.add_s3_bucket_policy(
        "tileBucketPolicy", tile_bucket_name,
        ['s3:GetObject', 's3:PutObject'],
        { 'AWS': role})

    # The ingest bucket is a staging area for cuboids uploaded during volumetric ingest.
    creating_ingest_bucket = False
    ingest_bucket_name = names.ingest_bucket
    if not aws.s3_bucket_exists(session, ingest_bucket_name):
        creating_ingest_bucket = True
        ing_bucket_life_cycle_cfg = get_cf_bucket_life_cycle_rules()
        config.add_s3_bucket("ingestBucket", ingest_bucket_name,
            life_cycle_config=ing_bucket_life_cycle_cfg)

    config.add_s3_bucket_policy(
        "ingestBucketPolicy", ingest_bucket_name,
        ['s3:GetObject', 's3:PutObject', 's3:PutObjectTagging'],
        { 'AWS': cuboid_import_role})

    config.add_ec2_instance("CacheManager",
                                names.cache_manager,
                                aws.ami_lookup(session, "cachemanager.boss"),
                                keypair,
                                subnet=Ref("InternalSubnet"),
                                public_ip=False,
                                type_=const.CACHE_MANAGER_TYPE,
                                security_groups=[Ref("InternalSecurityGroup")],
                                user_data=parsed_user_data,
                                role="cachemanager")

    config.add_sqs_queue(
        names.ingest_cleanup_dlq, names.ingest_cleanup_dlq, 30, 20160)
    config.add_sqs_queue(
        names.cuboid_import_dlq, names.cuboid_import_dlq, 30, 20160)

    config.add_sqs_policy('cuboidImportDlqPolicy', 'cuboidImportDlqPolicy',
        [Ref(names.cuboid_import_dlq)], cuboid_import_role)

    lambda_bucket = aws.get_lambda_s3_bucket(session)
    config.add_lambda("MultiLambda",
                      names.multi_lambda,
                      Ref("LambdaCacheExecutionRole"),
                      s3=(lambda_bucket,
                          "multilambda.{}.zip".format(domain),
                          "lambda_loader.handler"),
                      timeout=120,
                      memory=1536,
                      security_groups=[Ref('InternalSecurityGroup')],
                      subnets=lambda_subnets,
                      runtime='python3.6')
    config.add_lambda("TileUploadedLambda",
                      names.tile_uploaded_lambda,
                      Ref("LambdaCacheExecutionRole"),
                      s3=(lambda_bucket,
                          "multilambda.{}.zip".format(domain),
                          "tile_uploaded_lambda.handler"),
                      timeout=5,
                      memory=1024,
                      runtime='python3.6')
    config.add_lambda("TileIngestLambda",
                      names.tile_ingest_lambda,
                      Ref("LambdaCacheExecutionRole"),
                      s3=(lambda_bucket,
                          "multilambda.{}.zip".format(domain),
                          "tile_ingest_lambda.handler"),
                      timeout=30,
                      memory=1536,
                      runtime='python3.6')
    config.add_lambda("DeleteTileObjsLambda",
                      names.delete_tile_objs_lambda,
                      Ref("LambdaCacheExecutionRole"),
                      s3=(lambda_bucket,
                          "multilambda.{}.zip".format(domain),
                          "delete_tile_objs_lambda.handler"),
                      timeout=90,
                      memory=128,
                      runtime='python3.6',
                      dlq=Arn(names.ingest_cleanup_dlq))
    config.add_lambda("DeleteTileEntryLambda",
                      names.delete_tile_index_entry_lambda,
                      Ref("LambdaCacheExecutionRole"),
                      s3=(lambda_bucket,
                          "multilambda.{}.zip".format(domain),
                          "delete_tile_index_entry_lambda.handler"),
                      timeout=90,
                      memory=128,
                      runtime='python3.6',
                      dlq=Arn(names.ingest_cleanup_dlq))
    config.add_lambda("CuboidImportLambda",
                      names.cuboid_import_lambda,
                      Ref(CUBOID_IMPORT_ROLE),
                      s3=(lambda_bucket,
                          "multilambda.{}.zip".format(domain),
                          "cuboid_import_lambda.handler"),
                      timeout=90,
                      memory=128,
                      runtime='python3.6',
                      dlq=Arn(names.cuboid_import_dlq))
    config.add_lambda("VolumetricIngestLambda",
                      names.volumetric_ingest_queue_upload_lambda,
                      Ref("LambdaCacheExecutionRole"),
                      s3=(lambda_bucket,
                          "multilambda.{}.zip".format(domain),
                          "ingest_queue_upload_volumetric_lambda.handler"),
                      timeout=120,
                      memory=1024,
                      runtime='python3.6')

    if creating_ingest_bucket:
        config.add_lambda_permission(
            'ingestBucketInvokeCuboidImportLambda', names.cuboid_import_lambda,
            principal='s3.amazonaws.com', source={
                'Fn::Join': [':', ['arn', 'aws', 's3', '', '', ingest_bucket_name]]}, #DP TODO: move into constants
            depends_on=['ingestBucket', 'CuboidImportLambda']
        )
    else:
        # NOTE: this permission doesn't seem to apply properly when doing a
        # CloudFormation update.  During testing, I had to manually apply this
        # permission before the bucket trigger could be applied in post_init().
        # Doing a CloudFormation delete followed by a create did not have a
        # problem.
        config.add_lambda_permission(
            'ingestBucketInvokeCuboidImportLambda', names.cuboid_import_lambda,
            principal='s3.amazonaws.com', source={
                'Fn::Join': [':', ['arn', 'aws', 's3', '', '', ingest_bucket_name]]},
            depends_on='CuboidImportLambda'
        )
    # Add topic to indicating that the object store has been write locked.
    # Now using "production mailing list" instead of separate write lock topic.
    #config.add_sns_topic('WriteLock',
    #                     names.write_lock_topic,
    #                     names.write_lock,
    #                     []) # TODO: add subscribers

    return config
コード例 #9
0
ファイル: cachedb.py プロジェクト: lrodri29/boss-manage
def create_config(session, domain, keypair=None, user_data=None):
    """
    Create the CloudFormationConfiguration object.
    Args:
        session: amazon session object
        domain: domain of the stack being created
        keypair: keypair used to by instances being created
        user_data (UserData): information used by the endpoint instance and vault.  Data will be run through the CloudFormation Fn::Join template intrinsic function so other template intrinsic functions used in the user_data will be parsed and executed.

    Returns: the config for the Cloud Formation stack

    """

    # Prepare user data for parsing by CloudFormation.
    if user_data is not None:
        parsed_user_data = {
            "Fn::Join": ["", user_data.format_for_cloudformation()]
        }
    else:
        parsed_user_data = user_data

    names = AWSNames(domain)
    config = CloudFormationConfiguration("cachedb", domain, const.REGION)

    vpc_id = config.find_vpc(session)

    # Create several subnets for all the lambdas to use.
    lambda_azs = aws.azs_lookup(session, lambda_compatible_only=True)
    internal_route_table_id = aws.rt_lookup(session, vpc_id, names.internal)

    print("AZs for lambda: " + str(lambda_azs))
    lambda_subnets = []
    for i in range(const.LAMBDA_SUBNETS):
        key = 'LambdaSubnet{}'.format(i)
        lambda_subnets.append(Ref(key))
        config.add_subnet(key,
                          names.subnet('lambda{}'.format(i)),
                          az=lambda_azs[i % len(lambda_azs)][0])
        config.add_route_table_association(key + "RTA",
                                           internal_route_table_id, Ref(key))

    # Lookup the External Subnet, Internal Security Group IDs that are
    # needed by other resources
    internal_subnet_id = aws.subnet_id_lookup(session,
                                              names.subnet("internal"))
    config.add_arg(
        Arg.Subnet("InternalSubnet", internal_subnet_id,
                   "ID of Internal Subnet to create resources in"))

    internal_sg_id = aws.sg_lookup(session, vpc_id, names.internal)
    config.add_arg(
        Arg.SecurityGroup("InternalSecurityGroup", internal_sg_id,
                          "ID of internal Security Group"))

    role = aws.role_arn_lookup(session, "lambda_cache_execution")
    config.add_arg(
        Arg.String("LambdaCacheExecutionRole", role,
                   "IAM role for multilambda." + domain))

    index_bucket_name = names.cuboid_bucket
    if not aws.s3_bucket_exists(session, index_bucket_name):
        config.add_s3_bucket("cuboidBucket", index_bucket_name)
    config.add_s3_bucket_policy("cuboidBucketPolicy", index_bucket_name,
                                ['s3:GetObject', 's3:PutObject'],
                                {'AWS': role})

    delete_bucket_name = names.delete_bucket
    if not aws.s3_bucket_exists(session, delete_bucket_name):
        config.add_s3_bucket("deleteBucket", delete_bucket_name)
    config.add_s3_bucket_policy("deleteBucketPolicy", delete_bucket_name,
                                ['s3:GetObject', 's3:PutObject'],
                                {'AWS': role})

    creating_tile_bucket = False
    tile_bucket_name = names.tile_bucket
    if not aws.s3_bucket_exists(session, tile_bucket_name):
        creating_tile_bucket = True
        config.add_s3_bucket("tileBucket", tile_bucket_name)

    config.add_s3_bucket_policy("tileBucketPolicy", tile_bucket_name,
                                ['s3:GetObject', 's3:PutObject'],
                                {'AWS': role})

    ingest_bucket_name = names.ingest_bucket
    if not aws.s3_bucket_exists(session, ingest_bucket_name):
        config.add_s3_bucket("ingestBucket", ingest_bucket_name)
    config.add_s3_bucket_policy("ingestBucketPolicy", ingest_bucket_name,
                                ['s3:GetObject', 's3:PutObject'],
                                {'AWS': role})

    config.add_ec2_instance("CacheManager",
                            names.cache_manager,
                            aws.ami_lookup(session, "cachemanager.boss"),
                            keypair,
                            subnet=Ref("InternalSubnet"),
                            public_ip=False,
                            type_=const.CACHE_MANAGER_TYPE,
                            security_groups=[Ref("InternalSecurityGroup")],
                            user_data=parsed_user_data,
                            role="cachemanager")

    lambda_bucket = aws.get_lambda_s3_bucket(session)
    config.add_lambda("MultiLambda",
                      names.multi_lambda,
                      Ref("LambdaCacheExecutionRole"),
                      s3=(aws.get_lambda_s3_bucket(session),
                          "multilambda.{}.zip".format(domain),
                          "lambda_loader.handler"),
                      timeout=120,
                      memory=1024,
                      security_groups=[Ref('InternalSecurityGroup')],
                      subnets=lambda_subnets,
                      runtime='python3.6')

    if creating_tile_bucket:
        config.add_lambda_permission(
            'tileBucketInvokeMultiLambda',
            names.multi_lambda,
            principal='s3.amazonaws.com',
            source={
                'Fn::Join':
                [':', ['arn', 'aws', 's3', '', '', tile_bucket_name]]
            },  #DP TODO: move into constants
            depends_on=['tileBucket', 'MultiLambda'])
    else:
        config.add_lambda_permission(
            'tileBucketInvokeMultiLambda',
            names.multi_lambda,
            principal='s3.amazonaws.com',
            source={
                'Fn::Join':
                [':', ['arn', 'aws', 's3', '', '', tile_bucket_name]]
            },
            depends_on='MultiLambda')

    # Add topic to indicating that the object store has been write locked.
    # Now using "production mailing list" instead of separate write lock topic.
    #config.add_sns_topic('WriteLock',
    #                     names.write_lock_topic,
    #                     names.write_lock,
    #                     []) # TODO: add subscribers

    return config
コード例 #10
0
                                   objectIds=resp['ids'])
    errors = [
        field['stringValue'] for obj in resp['pipelineObjects']
        for field in obj['fields'] if field['key'] == '@failureReason'
    ]
    return errors


bosslet_config = configuration.BossConfiguration('test.boss')
config = cloudformation.CloudFormationConfiguration('test', bosslet_config)

config.add_vpc()
internal_subnets, external_subnets = config.add_all_subnets(
)  # Add one subnet per AZ

ami = aws.ami_lookup(bosslet_config, const.BASTION_AMI)[0]

pipeline = datapipeline.DataPipeline()
for subnet in internal_subnets:
    AZ = subnet['Ref'][0]
    pipeline.add_shell_command(AZ + "TestCommand",
                               "/bin/echo this is a test",
                               runs_on=datapipeline.Ref(AZ + "TestInstance"))
    pipeline.add_ec2_instance(AZ + "TestInstance",
                              subnet=subnet,
                              type='t1.micro',
                              image=ami)

config.add_data_pipeline("TestPipeline",
                         "test." + bosslet_config.INTERNAL_DOMAIN,
                         pipeline.objects)
コード例 #11
0
ファイル: activities.py プロジェクト: jhuapl-boss/boss-manage
def create_config(session, domain):
    """Create the CloudFormationConfiguration object."""
    config = CloudFormationConfiguration('activities', domain, const.REGION)
    names = AWSNames(domain)

    global keypair
    keypair = aws.keypair_lookup(session)

    vpc_id = config.find_vpc(session)
    sgs = aws.sg_lookup_all(session, vpc_id)
    internal_subnets, _ = config.find_all_availability_zones(session)
    internal_subnets_lambda, _ = config.find_all_availability_zones(session, lambda_compatible_only=True)
    topic_arn = aws.sns_topic_lookup(session, "ProductionMicronsMailingList")
    event_data = {
        "lambda-name": "delete_lambda",
        "db": names.endpoint_db,
        "meta-db": names.meta,
        "s3-index-table": names.s3_index,
        "id-index-table": names.id_index,
        "id-count-table": names.id_count_index,
        "cuboid_bucket": names.cuboid_bucket,
        "delete_bucket": names.delete_bucket,
        "topic-arn": topic_arn,
        "query-deletes-sfn-name": names.query_deletes,
        "delete-sfn-name": names.delete_cuboid,
        "delete-exp-sfn-name": names.delete_experiment,
        "delete-coord-frame-sfn-name": names.delete_coord_frame,
        "delete-coll-sfn-name": names.delete_collection
    }

    role_arn = aws.role_arn_lookup(session, "events_for_delete_lambda")
    multi_lambda = names.multi_lambda
    lambda_arn = aws.lambda_arn_lookup(session, multi_lambda)
    target_list = [{
        "Arn": lambda_arn,
        "Id": multi_lambda,
        "Input": json.dumps(event_data)
    }]
    schedule_expression = "cron(1 6-11/1 ? * TUE-FRI *)"
    #schedule_expression = "cron(0/2 * * * ? *)"  # testing fire every two minutes

    config.add_event_rule("DeleteEventRule", names.delete_event_rule, role_arn=role_arn,
                          schedule_expression=schedule_expression, target_list=target_list, description=None)
    # Events have to be given permission to run lambda.
    config.add_lambda_permission('DeleteRulePerm', multi_lambda, principal='events.amazonaws.com',
                                 source=Arn('DeleteEventRule'))
    user_data = UserData()
    user_data["system"]["fqdn"] = names.activities
    user_data["system"]["type"] = "activities"
    user_data["aws"]["db"] = names.endpoint_db
    user_data["aws"]["cache"] = names.cache
    user_data["aws"]["cache-state"] = names.cache_state
    user_data["aws"]["cache-db"] = "0"
    user_data["aws"]["cache-state-db"] = "0"
    user_data["aws"]["meta-db"] = names.meta
    user_data["aws"]["cuboid_bucket"] = names.cuboid_bucket
    user_data["aws"]["tile_bucket"] = names.tile_bucket
    user_data["aws"]["ingest_bucket"] = names.ingest_bucket
    user_data["aws"]["s3-index-table"] = names.s3_index
    user_data["aws"]["tile-index-table"] = names.tile_index
    user_data["aws"]["id-index-table"] = names.id_index
    user_data["aws"]["id-count-table"] = names.id_count_index
    user_data["aws"]["max_task_id_suffix"] = str(const.MAX_TASK_ID_SUFFIX)

    config.add_autoscale_group("Activities",
                               names.activities,
                               aws.ami_lookup(session, 'activities.boss'),
                               keypair,
                               subnets=internal_subnets_lambda,
                               type_=const.ACTIVITIES_TYPE,
                               security_groups=[sgs[names.internal]],
                               user_data=str(user_data),
                               role=aws.instance_profile_arn_lookup(session, "activities"),
                               min=1,
                               max=1)

    config.add_lambda("IngestLambda",
                      names.ingest_lambda,
                      aws.role_arn_lookup(session, 'IngestQueueUpload'),
                      const.INGEST_LAMBDA,
                      handler="index.handler",
                      timeout=60 * 5,
                      memory=3008)

    config.add_lambda_permission("IngestLambdaExecute", Ref("IngestLambda"))


    # Downsample / Resolution Hierarchy support
    lambda_role = aws.role_arn_lookup(session, "lambda_resolution_hierarchy")

    config.add_lambda("DownsampleVolumeLambda",
                      names.downsample_volume_lambda,
                      lambda_role,
                      s3=(aws.get_lambda_s3_bucket(session),
                          "multilambda.{}.zip".format(domain),
                          "downsample_volume.handler"),
                      timeout=120,
                      memory=1024,
                      runtime='python3.6',
                      dlq = Ref('DownsampleDLQ'))

    config.add_sns_topic("DownsampleDLQ",
                         names.downsample_dlq,
                         names.downsample_dlq,
                         [('lambda', Arn('DownsampleDLQLambda'))])

    config.add_lambda('DownsampleDLQLambda',
                      names.downsample_dlq,
                      lambda_role,
                      const.DOWNSAMPLE_DLQ_LAMBDA,
                      handler='index.handler',
                      timeout=10)

    config.add_lambda_permission('DownsampleDLQLambdaExecute',
                                 Ref('DownsampleDLQLambda'))

    return config
コード例 #12
0
ファイル: api.py プロジェクト: theiscoresearch/boss-manage
def create_config(session, domain, keypair=None, db_config={}):
    """
    Create the CloudFormationConfiguration object.
    Args:
        session: amazon session object
        domain (string): domain of the stack being created
        keypair: keypair used to by instances being created
        db_config (dict): information needed by rds

    Returns: the config for the Cloud Formation stack

    """

    names = AWSNames(domain)

    # Lookup IAM Role and SNS Topic ARNs for used later in the config
    endpoint_role_arn = aws.role_arn_lookup(session, "endpoint")
    cachemanager_role_arn = aws.role_arn_lookup(session, 'cachemanager')
    dns_arn = aws.sns_topic_lookup(session, names.dns.replace(".", "-"))
    if dns_arn is None:
        raise Exception("SNS topic named dns." + domain + " does not exist.")

    mailing_list_arn = aws.sns_topic_lookup(session, const.PRODUCTION_MAILING_LIST)
    if mailing_list_arn is None:
        msg = "MailingList {} needs to be created before running config".format(const.PRODUCTION_MAILING_LIST)
        raise Exception(msg)

    # Configure Vault and create the user data config that the endpoint will
    # use for connecting to Vault and the DB instance
    user_data = UserData()
    user_data["system"]["fqdn"] = names.endpoint
    user_data["system"]["type"] = "endpoint"
    user_data["aws"]["db"] = names.endpoint_db
    user_data["aws"]["cache"] = names.cache
    user_data["aws"]["cache-state"] = names.cache_state

    ## cache-db and cache-stat-db need to be in user_data for lambda to access them.
    user_data["aws"]["cache-db"] = "0"
    user_data["aws"]["cache-state-db"] = "0"
    user_data["aws"]["meta-db"] = names.meta

    # Use CloudFormation's Ref function so that queues' URLs are placed into
    # the Boss config file.
    user_data["aws"]["s3-flush-queue"] = str(Ref(names.s3flush_queue)) # str(Ref("S3FlushQueue")) DP XXX
    user_data["aws"]["s3-flush-deadletter-queue"] = str(Ref(names.deadletter_queue)) #str(Ref("DeadLetterQueue")) DP XXX
    user_data["aws"]["cuboid_bucket"] = names.cuboid_bucket
    user_data["aws"]["tile_bucket"] = names.tile_bucket
    user_data["aws"]["ingest_bucket"] = names.ingest_bucket
    user_data["aws"]["s3-index-table"] = names.s3_index
    user_data["aws"]["tile-index-table"] = names.tile_index
    user_data["aws"]["id-index-table"] = names.id_index
    user_data["aws"]["id-count-table"] = names.id_count_index
    user_data["aws"]["prod_mailing_list"] = mailing_list_arn

    user_data["auth"]["OIDC_VERIFY_SSL"] = 'True'
    user_data["lambda"]["flush_function"] = names.multi_lambda
    user_data["lambda"]["page_in_function"] = names.multi_lambda
    user_data["lambda"]["ingest_function"] = names.multi_lambda

    user_data['sfn']['populate_upload_queue'] = names.ingest_queue_populate
    user_data['sfn']['upload_sfn'] = names.ingest_queue_upload
    user_data['sfn']['downsample_sfn'] = names.resolution_hierarchy

    # Prepare user data for parsing by CloudFormation.
    parsed_user_data = { "Fn::Join" : ["", user_data.format_for_cloudformation()]}

    config = CloudFormationConfiguration('api', domain, const.REGION)

    vpc_id = config.find_vpc(session)
    az_subnets, external_subnets = config.find_all_availability_zones(session)
    az_subnets_lambda, external_subnets_lambda = config.find_all_availability_zones(session, lambda_compatible_only=True)
    sgs = aws.sg_lookup_all(session, vpc_id)

    # DP XXX: hack until we can get productio updated correctly
    config.add_security_group('AllHTTPSSecurityGroup', 'https.' + domain, [('tcp', '443', '443', '0.0.0.0/0')])
    sgs[names.https] = Ref('AllHTTPSSecurityGroup')

    # Create SQS queues and apply access control policies.
    #config.add_sqs_queue("DeadLetterQueue", names.deadletter_queue, 30, 20160) DP XXX
    config.add_sqs_queue(names.deadletter_queue, names.deadletter_queue, 30, 20160)

    max_receives = 3
    #config.add_sqs_queue("S3FlushQueue", DP XXX
    config.add_sqs_queue(names.s3flush_queue,
                         names.s3flush_queue,
                         30,
                         dead=(Arn(names.deadletter_queue), max_receives))

    config.add_sqs_policy("sqsEndpointPolicy", 'sqsEndpointPolicy', # DP XXX
                          [Ref(names.deadletter_queue), Ref(names.s3flush_queue)],
                          endpoint_role_arn)

    config.add_sqs_policy("sqsCachemgrPolicy", 'sqsCachemgrPolicy', # DP XXX
                          [Ref(names.deadletter_queue), Ref(names.s3flush_queue)],
                          cachemanager_role_arn)

    # Create the endpoint ASG, ELB, and RDS instance
    config.add_autoscale_group("Endpoint",
                               names.endpoint,
                               aws.ami_lookup(session, "endpoint.boss"),
                               keypair,
                               subnets=az_subnets_lambda,
                               type_=const.ENDPOINT_TYPE,
                               security_groups=[sgs[names.internal]],
                               user_data=parsed_user_data,
                               min=const.ENDPOINT_CLUSTER_MIN,
                               max=const.ENDPOINT_CLUSTER_MAX,
                               elb=Ref("EndpointLoadBalancer"),
                               notifications=dns_arn,
                               role=aws.instance_profile_arn_lookup(session, 'endpoint'),
                               health_check_grace_period=90,
                               detailed_monitoring=True,
                               depends_on=["EndpointLoadBalancer", "EndpointDB"])

    cert = aws.cert_arn_lookup(session, names.public_dns("api"))
    config.add_loadbalancer("EndpointLoadBalancer",
                            names.endpoint_elb,
                            [("443", "80", "HTTPS", cert)],
                            subnets=external_subnets_lambda,
                            security_groups=[sgs[names.internal], sgs[names.https]],
                            public=True)

    # Endpoint servers are not CPU bound typically, so react quickly to load
    config.add_autoscale_policy("EndpointScaleUp",
                                Ref("Endpoint"),
                                adjustments=[
                                    (0.0, 10, 1),  # 12% - 22% Utilization add 1 instance
                                    (10, None, 2)  # Above 22% Utilization add 2 instances
                                ],
                                alarms=[
                                    ("CPUUtilization", "Maximum", "GreaterThanThreshold", "12")
                                ],
                                period=1)

    config.add_autoscale_policy("EndpointScaleDown",
                                Ref("Endpoint"),
                                adjustments=[
                                    (None, 0.0, -1),   # Under 1.5% Utilization remove 1 instance
                                ],
                                alarms=[
                                    ("CPUUtilization", "Average", "LessThanThreshold", "1.5")
                                ],
                                period=50)

    config.add_rds_db("EndpointDB",
                      names.endpoint_db,
                      db_config.get("port"),
                      db_config.get("name"),
                      db_config.get("user"),
                      db_config.get("password"),
                      az_subnets,
                      type_ = const.RDS_TYPE,
                      security_groups=[sgs[names.internal]])

    # Create the Meta, s3Index, tileIndex, annotation Dynamo tables
    with open(const.DYNAMO_METADATA_SCHEMA, 'r') as fh:
        dynamo_cfg = json.load(fh)
    config.add_dynamo_table_from_json("EndpointMetaDB", names.meta, **dynamo_cfg)

    with open(const.DYNAMO_S3_INDEX_SCHEMA, 'r') as s3fh:
        dynamo_s3_cfg = json.load(s3fh)
    config.add_dynamo_table_from_json('s3Index', names.s3_index, **dynamo_s3_cfg)  # DP XXX

    with open(const.DYNAMO_TILE_INDEX_SCHEMA, 'r') as tilefh:
        dynamo_tile_cfg = json.load(tilefh)
    config.add_dynamo_table_from_json('tileIndex', names.tile_index, **dynamo_tile_cfg)  # DP XXX

    with open(const.DYNAMO_ID_INDEX_SCHEMA, 'r') as id_ind_fh:
        dynamo_id_ind__cfg = json.load(id_ind_fh)
    config.add_dynamo_table_from_json('idIndIndex', names.id_index, **dynamo_id_ind__cfg)  # DP XXX

    with open(const.DYNAMO_ID_COUNT_SCHEMA, 'r') as id_count_fh:
        dynamo_id_count_cfg = json.load(id_count_fh)
    config.add_dynamo_table_from_json('idCountIndex', names.id_count_index, **dynamo_id_count_cfg)  # DP XXX

    return config
コード例 #13
0
ファイル: core.py プロジェクト: jhuapl-boss/boss-manage
def create_config(session, domain):
    """Create the CloudFormationConfiguration object."""
    config = CloudFormationConfiguration('core', domain, const.REGION)
    names = AWSNames(domain)

    global keypair
    keypair = aws.keypair_lookup(session)

    config.add_vpc()

    # Create the internal and external subnets
    config.add_subnet('InternalSubnet', names.subnet('internal'))
    config.add_subnet('ExternalSubnet', names.subnet('external'))
    internal_subnets, external_subnets = config.add_all_azs(session)
    # it seems that both Lambdas and ASGs needs lambda_compatible_only subnets.
    internal_subnets_lambda, external_subnets_lambda = config.add_all_azs(session, lambda_compatible_only=True)

    config.add_ec2_instance("Bastion",
                            names.bastion,
                            aws.ami_lookup(session, const.BASTION_AMI),
                            keypair,
                            subnet = Ref("ExternalSubnet"),
                            public_ip = True,
                            user_data = const.BASTION_USER_DATA,
                            security_groups = [Ref("InternalSecurityGroup"), Ref("BastionSecurityGroup")],
                            depends_on = "AttachInternetGateway")

    user_data = UserData()
    user_data["system"]["fqdn"] = names.consul
    user_data["system"]["type"] = "consul"
    user_data["consul"]["cluster"] = str(get_scenario(const.CONSUL_CLUSTER_SIZE))
    config.add_autoscale_group("Consul",
                               names.consul,
                               aws.ami_lookup(session, "consul.boss"),
                               keypair,
                               subnets = internal_subnets_lambda,
                               security_groups = [Ref("InternalSecurityGroup")],
                               user_data = str(user_data),
                               min = const.CONSUL_CLUSTER_SIZE,
                               max = const.CONSUL_CLUSTER_SIZE,
                               notifications = Ref("DNSSNS"),
                               role = aws.instance_profile_arn_lookup(session, 'consul'),
                               support_update = False, # Update will restart the instances manually
                               depends_on = ["DNSLambda", "DNSSNS", "DNSLambdaExecute"])

    user_data = UserData()
    user_data["system"]["fqdn"] = names.vault
    user_data["system"]["type"] = "vault"
    config.add_autoscale_group("Vault",
                               names.vault,
                               aws.ami_lookup(session, "vault.boss"),
                               keypair,
                               subnets = internal_subnets_lambda,
                               security_groups = [Ref("InternalSecurityGroup")],
                               user_data = str(user_data),
                               min = const.VAULT_CLUSTER_SIZE,
                               max = const.VAULT_CLUSTER_SIZE,
                               notifications = Ref("DNSSNS"),
                               depends_on = ["Consul", "DNSLambda", "DNSSNS", "DNSLambdaExecute"])


    user_data = UserData()
    user_data["system"]["fqdn"] = names.auth
    user_data["system"]["type"] = "auth"
    deps = ["AuthSecurityGroup",
            "AttachInternetGateway",
            "DNSLambda",
            "DNSSNS",
            "DNSLambdaExecute"]

    SCENARIO = os.environ["SCENARIO"]
    USE_DB = SCENARIO in ("production", "ha-development",)
    # Problem: If development scenario uses a local DB. If the auth server crashes
    #          and is auto restarted by the autoscale group then the new auth server
    #          will not have any of the previous configuration, because the old DB
    #          was lost. Using an RDS for development fixes this at the cost of having
    #          the core config taking longer to launch.
    if USE_DB:
        deps.append("AuthDB")
        user_data["aws"]["db"] = "keycloak" # flag for init script for which config to use

    cert = aws.cert_arn_lookup(session, names.public_dns('auth'))
    create_asg_elb(config,
                   "Auth",
                   names.auth,
                   aws.ami_lookup(session, "auth.boss"),
                   keypair,
                   str(user_data),
                   const.AUTH_CLUSTER_SIZE,
                   internal_subnets_lambda,
                   external_subnets_lambda,
                   [("443", "8080", "HTTPS", cert)],
                   "HTTP:8080/index.html",
                   sgs = [Ref("AuthSecurityGroup")],
                   type_=const.AUTH_TYPE,
                   depends_on=deps)

    if USE_DB:
        config.add_rds_db("AuthDB",
                          names.auth_db,
                          "3306",
                          "keycloak",
                          "keycloak",
                          "keycloak",
                          internal_subnets,
                          type_ = "db.t2.micro",
                          security_groups = [Ref("InternalSecurityGroup")])


    config.add_lambda("DNSLambda",
                      names.dns,
                      aws.role_arn_lookup(session, 'UpdateRoute53'),
                      const.DNS_LAMBDA,
                      handler="index.handler",
                      timeout=10,
                      depends_on="DNSZone")

    config.add_lambda_permission("DNSLambdaExecute", Ref("DNSLambda"))

    config.add_sns_topic("DNSSNS",
                         names.dns,
                         names.dns,
                         [("lambda", Arn("DNSLambda"))])


    config.add_security_group("InternalSecurityGroup",
                              names.internal,
                              [("-1", "-1", "-1", "10.0.0.0/8")])

    # Allow SSH access to bastion from anywhere
    config.add_security_group("BastionSecurityGroup",
                              names.ssh,
                              [("tcp", "22", "22", const.INCOMING_SUBNET)])

    config.add_security_group("AuthSecurityGroup",
                              #names.https, DP XXX: hack until we can get production updated correctly
                              names.auth,
                              [("tcp", "443", "443", "0.0.0.0/0")])

    # Create the internal route table to route traffic to the NAT Bastion
    all_internal_subnets = internal_subnets.copy()
    all_internal_subnets.append(Ref("InternalSubnet"))
    config.add_route_table("InternalRouteTable",
                           names.internal,
                           subnets = all_internal_subnets)

    config.add_route_table_route("InternalNatRoute",
                                 Ref("InternalRouteTable"),
                                 nat = Ref("NAT"),
                                 depends_on = "NAT")

    # Create the internet gateway and internet router
    all_external_subnets = external_subnets.copy()
    all_external_subnets.append(Ref("ExternalSubnet"))
    config.add_route_table("InternetRouteTable",
                           names.internet,
                           subnets = all_external_subnets)

    config.add_route_table_route("InternetRoute",
                                 Ref("InternetRouteTable"),
                                 gateway = Ref("InternetGateway"),
                                 depends_on = "AttachInternetGateway")

    config.add_internet_gateway("InternetGateway", names.internet)
    config.add_endpoint("S3Endpoint", "s3", [Ref("InternalRouteTable"), Ref('InternetRouteTable')])
    config.add_endpoint("DynamoDBEndpoint", "dynamodb", [Ref("InternalRouteTable"), Ref('InternetRouteTable')])
    config.add_nat("NAT", Ref("ExternalSubnet"), depends_on="AttachInternetGateway")

    return config
コード例 #14
0
ファイル: api.py プロジェクト: reconstrue/boss-manage
def create_config(bosslet_config, db_config={}):
    names = bosslet_config.names
    session = bosslet_config.session

    # Lookup IAM Role and SNS Topic ARNs for used later in the config
    endpoint_role_arn = aws.role_arn_lookup(session, "endpoint")
    cachemanager_role_arn = aws.role_arn_lookup(session, 'cachemanager')
    dns_arn = aws.sns_topic_lookup(session, names.dns.sns)
    if dns_arn is None:
        raise MissingResourceError('SNS topic', names.dns.sns)

    mailing_list_arn = aws.sns_topic_lookup(session,
                                            bosslet_config.ALERT_TOPIC)
    if mailing_list_arn is None:
        raise MissingResourceError('SNS topic', bosslet_config.ALERT_TOPIC)

    # Configure Vault and create the user data config that the endpoint will
    # use for connecting to Vault and the DB instance
    user_data = UserData()
    user_data["system"]["fqdn"] = names.endpoint.dns
    user_data["system"]["type"] = "endpoint"
    user_data["aws"]["db"] = names.endpoint_db.rds
    user_data["aws"]["cache"] = names.cache.redis
    user_data["aws"]["cache-state"] = names.cache_state.redis
    if const.REDIS_SESSION_TYPE is not None:
        user_data["aws"]["cache-session"] = names.cache_session.redis
    else:
        # Don't create a Redis server for dev stacks.
        user_data["aws"]["cache-session"] = ''
    if const.REDIS_THROTTLE_TYPE is not None:
        user_data["aws"]["cache-throttle"] = names.cache_throttle.redis
    else:
        user_data["aws"]["cache-throttle"] = ''

    ## cache-db and cache-stat-db need to be in user_data for lambda to access them.
    user_data["aws"]["cache-db"] = "0"
    user_data["aws"]["cache-state-db"] = "0"
    user_data["aws"]["cache-throttle-db"] = "0"
    user_data["aws"]["cache-session-db"] = "0"
    user_data["aws"]["meta-db"] = names.meta.ddb

    # Use CloudFormation's Ref function so that queues' URLs are placed into
    # the Boss config file.
    user_data["aws"]["s3-flush-queue"] = str(Ref(
        names.s3flush.sqs))  # str(Ref("S3FlushQueue")) DP XXX
    user_data["aws"]["s3-flush-deadletter-queue"] = str(
        Ref(names.deadletter.sqs))  #str(Ref("DeadLetterQueue")) DP XXX
    user_data["aws"]["cuboid_bucket"] = names.cuboid_bucket.s3
    user_data["aws"]["tile_bucket"] = names.tile_bucket.s3
    user_data["aws"]["ingest_bucket"] = names.ingest_bucket.s3
    user_data["aws"]["s3-index-table"] = names.s3_index.ddb
    user_data["aws"]["tile-index-table"] = names.tile_index.ddb
    user_data["aws"]["id-index-table"] = names.id_index.ddb
    user_data["aws"]["id-count-table"] = names.id_count_index.ddb
    user_data["aws"]["prod_mailing_list"] = mailing_list_arn
    user_data["aws"]["max_task_id_suffix"] = str(const.MAX_TASK_ID_SUFFIX)
    user_data["aws"]["id-index-new-chunk-threshold"] = str(
        const.DYNAMO_ID_INDEX_NEW_CHUNK_THRESHOLD)
    user_data["aws"]["index-deadletter-queue"] = str(
        Ref(names.index_deadletter.sqs))
    user_data["aws"]["index-cuboids-keys-queue"] = str(
        Ref(names.index_cuboids_keys.sqs))

    user_data["auth"]["OIDC_VERIFY_SSL"] = str(bosslet_config.VERIFY_SSL)
    user_data["lambda"]["flush_function"] = names.multi_lambda.lambda_
    user_data["lambda"]["page_in_function"] = names.multi_lambda.lambda_
    user_data["lambda"]["ingest_function"] = names.tile_ingest.lambda_
    user_data["lambda"]["downsample_volume"] = names.downsample_volume.lambda_
    user_data["lambda"]["tile_uploaded_function"] = names.tile_uploaded.lambda_

    user_data['sfn']['populate_upload_queue'] = names.ingest_queue_populate.sfn
    user_data['sfn']['upload_sfn'] = names.ingest_queue_upload.sfn
    user_data['sfn'][
        'volumetric_upload_sfn'] = names.volumetric_ingest_queue_upload.sfn
    user_data['sfn']['downsample_sfn'] = names.resolution_hierarchy.sfn
    user_data['sfn'][
        'index_cuboid_supervisor_sfn'] = names.index_cuboid_supervisor.sfn

    # Prepare user data for parsing by CloudFormation.
    parsed_user_data = {
        "Fn::Join": ["", user_data.format_for_cloudformation()]
    }

    config = CloudFormationConfiguration('api', bosslet_config, version="2")
    keypair = bosslet_config.SSH_KEY

    vpc_id = config.find_vpc()
    internal_subnets, external_subnets = config.find_all_subnets()
    az_subnets_asg, external_subnets_asg = config.find_all_subnets(
        compatibility='asg')
    sgs = aws.sg_lookup_all(session, vpc_id)

    # DP XXX: hack until we can get productio updated correctly
    config.add_security_group(
        'AllHttpHttpsSecurityGroup', names.https.sg,
        [('tcp', '443', '443', bosslet_config.HTTPS_INBOUND),
         ('tcp', '80', '80', bosslet_config.HTTPS_INBOUND)])
    sgs[names.https.sg] = Ref('AllHttpHttpsSecurityGroup')

    # Create SQS queues and apply access control policies.
    # Deadletter queue for indexing operations.  This one is populated
    # manually by states in the indexing step functions.
    config.add_sqs_queue(names.index_deadletter.sqs,
                         names.index_deadletter.sqs, 30, 20160)

    # Queue that holds S3 object keys of cuboids to be indexed.
    config.add_sqs_queue(names.index_cuboids_keys.sqs,
                         names.index_cuboids_keys.sqs, 120, 20160)

    #config.add_sqs_queue("DeadLetterQueue", names.deadletter.sqs, 30, 20160) DP XXX
    config.add_sqs_queue(names.deadletter.sqs, names.deadletter.sqs, 30, 20160)

    max_receives = 3
    #config.add_sqs_queue("S3FlushQueue", DP XXX
    config.add_sqs_queue(names.s3flush.sqs,
                         names.s3flush.sqs,
                         30,
                         dead=(Arn(names.deadletter.sqs), max_receives))

    config.add_sqs_policy(
        "sqsEndpointPolicy",
        'sqsEndpointPolicy',  # DP XXX
        [Ref(names.deadletter.sqs),
         Ref(names.s3flush.sqs)],
        endpoint_role_arn)

    config.add_sqs_policy(
        "sqsCachemgrPolicy",
        'sqsCachemgrPolicy',  # DP XXX
        [Ref(names.deadletter.sqs),
         Ref(names.s3flush.sqs)],
        cachemanager_role_arn)

    # Create the endpoint ASG, ELB, and RDS instance

    cert = aws.cert_arn_lookup(session, names.public_dns("api"))
    target_group_keys = config.add_app_loadbalancer(
        "EndpointAppLoadBalancer",
        names.endpoint_elb.dns, [("443", "80", "HTTPS", cert)],
        vpc_id=vpc_id,
        subnets=external_subnets_asg,
        security_groups=[sgs[names.internal.sg], sgs[names.https.sg]],
        public=True)

    target_group_arns = [Ref(key) for key in target_group_keys]

    config.add_public_dns('EndpointAppLoadBalancer', names.public_dns('api'))
    config.add_autoscale_group("Endpoint",
                               names.endpoint.dns,
                               aws.ami_lookup(bosslet_config,
                                              names.endpoint.ami),
                               keypair,
                               subnets=az_subnets_asg,
                               type_=const.ENDPOINT_TYPE,
                               security_groups=[sgs[names.internal.sg]],
                               user_data=parsed_user_data,
                               min=const.ENDPOINT_CLUSTER_MIN,
                               max=const.ENDPOINT_CLUSTER_MAX,
                               notifications=dns_arn,
                               role=aws.instance_profile_arn_lookup(
                                   session, 'endpoint'),
                               health_check_grace_period=90,
                               detailed_monitoring=True,
                               target_group_arns=target_group_arns,
                               depends_on=["EndpointDB"])

    # Endpoint servers are not CPU bound typically, so react quickly to load
    config.add_autoscale_policy(
        "EndpointScaleUp",
        Ref("Endpoint"),
        adjustments=[
            (0.0, 10, 1),  # 12% - 22% Utilization add 1 instance
            (10, None, 2)  # Above 22% Utilization add 2 instances
        ],
        alarms=[("CPUUtilization", "Maximum", "GreaterThanThreshold", "12")],
        period=1)

    config.add_autoscale_policy(
        "EndpointScaleDown",
        Ref("Endpoint"),
        adjustments=[
            (None, 0.0, -1),  # Under 1.5% Utilization remove 1 instance
        ],
        alarms=[("CPUUtilization", "Average", "LessThanThreshold", "1.5")],
        period=50)

    config.add_rds_db("EndpointDB",
                      names.endpoint_db.dns,
                      db_config.get("port"),
                      db_config.get("name"),
                      db_config.get("user"),
                      db_config.get("password"),
                      internal_subnets,
                      type_=const.RDS_TYPE,
                      security_groups=[sgs[names.internal.sg]])

    # Create the Meta, s3Index, tileIndex, annotation Dynamo tables
    with open(const.DYNAMO_METADATA_SCHEMA, 'r') as fh:
        dynamo_cfg = json.load(fh)
    config.add_dynamo_table_from_json("EndpointMetaDB", names.meta.ddb,
                                      **dynamo_cfg)

    with open(const.DYNAMO_S3_INDEX_SCHEMA, 'r') as s3fh:
        dynamo_s3_cfg = json.load(s3fh)
    config.add_dynamo_table_from_json('s3Index', names.s3_index.ddb,
                                      **dynamo_s3_cfg)  # DP XXX

    with open(const.DYNAMO_TILE_INDEX_SCHEMA, 'r') as tilefh:
        dynamo_tile_cfg = json.load(tilefh)
    config.add_dynamo_table_from_json('tileIndex', names.tile_index.ddb,
                                      **dynamo_tile_cfg)  # DP XXX

    with open(const.DYNAMO_ID_INDEX_SCHEMA, 'r') as id_ind_fh:
        dynamo_id_ind__cfg = json.load(id_ind_fh)
    config.add_dynamo_table_from_json('idIndIndex', names.id_index.ddb,
                                      **dynamo_id_ind__cfg)  # DP XXX

    with open(const.DYNAMO_ID_COUNT_SCHEMA, 'r') as id_count_fh:
        dynamo_id_count_cfg = json.load(id_count_fh)
    config.add_dynamo_table_from_json('idCountIndex', names.id_count_index.ddb,
                                      **dynamo_id_count_cfg)  # DP XXX

    return config
コード例 #15
0
def create_config(bosslet_config):
    """Create the CloudFormationConfiguration object."""
    config = CloudFormationConfiguration('core', bosslet_config, version="2")
    session = bosslet_config.session
    keypair = bosslet_config.SSH_KEY
    names = bosslet_config.names

    config.add_vpc()

    # Create the internal and external subnets
    config.add_subnet('InternalSubnet', names.internal.subnet)
    config.add_subnet('ExternalSubnet', names.external.subnet)
    internal_subnets, external_subnets = config.add_all_subnets()
    internal_subnets_asg, external_subnets_asg = config.find_all_subnets('asg')

    # Create a custom resource to help delete ENIs from lambdas
    # DP NOTE: After deleting a lambda the ENIs may stick around for while, causing the stack delete to fail
    #          See https://stackoverflow.com/a/41310289
    config.add_arg(Arg.String('StackName', config.stack_name))
    config.add_custom_resource('DeleteENI',
                               'DeleteENI',
                               Arn('DeleteENILambda'),
                               StackName=Ref('StackName'))
    config.add_lambda(
        "DeleteENILambda",
        names.delete_eni.lambda_,
        aws.role_arn_lookup(session, 'DeleteENI'),
        const.DELETE_ENI_LAMBDA,
        handler="index.handler",
        timeout=
        180,  # 3 minutes, so that there is enough time to wait for the ENI detach to complete
        runtime='python3.6'
    )  # If the lambda times out CF will retry a couple of times

    user_data = const.BASTION_USER_DATA.format(bosslet_config.NETWORK)
    config.add_ec2_instance("Bastion",
                            names.bastion.dns,
                            aws.ami_lookup(bosslet_config, const.BASTION_AMI),
                            keypair,
                            subnet=Ref("ExternalSubnet"),
                            public_ip=True,
                            user_data=user_data,
                            security_groups=[
                                Ref("InternalSecurityGroup"),
                                Ref("BastionSecurityGroup")
                            ],
                            depends_on="AttachInternetGateway")

    vault_role = aws.role_arn_lookup(session, 'apl-vault')
    vault_actions = ['kms:Encrypt', 'kms:Decrypt', 'kms:DescribeKey']
    config.add_kms_key("VaultKey", names.vault.key, vault_role, vault_actions)

    config.add_dynamo_table("VaultTable",
                            names.vault.ddb,
                            attributes=[('Path', 'S'), ('Key', 'S')],
                            key_schema=[('Path', 'HASH'), ('Key', 'RANGE')],
                            throughput=(5, 5))

    user_data = UserData()
    user_data["system"]["fqdn"] = names.vault.dns
    user_data["system"]["type"] = "vault"
    user_data["vault"]["kms_key"] = str(Ref("VaultKey"))
    user_data["vault"]["ddb_table"] = names.vault.ddb
    parsed_user_data = {
        "Fn::Join": ["", user_data.format_for_cloudformation()]
    }
    config.add_autoscale_group("Vault",
                               names.vault.dns,
                               aws.ami_lookup(bosslet_config, names.vault.ami),
                               keypair,
                               subnets=internal_subnets_asg,
                               type_=const.VAULT_TYPE,
                               security_groups=[Ref("InternalSecurityGroup")],
                               user_data=parsed_user_data,
                               min=const.VAULT_CLUSTER_SIZE,
                               max=const.VAULT_CLUSTER_SIZE,
                               notifications=Ref("DNSSNS"),
                               role=aws.instance_profile_arn_lookup(
                                   session, 'apl-vault'),
                               depends_on=[
                                   "VaultKey", "VaultTable", "DNSLambda",
                                   "DNSSNS", "DNSLambdaExecute"
                               ])

    user_data = UserData()
    user_data["system"]["fqdn"] = names.auth.dns
    user_data["system"]["type"] = "auth"
    deps = [
        "AuthSecurityGroup", "AttachInternetGateway", "DNSLambda", "DNSSNS",
        "DNSLambdaExecute"
    ]

    # Problem: If development scenario uses a local DB. If the auth server crashes
    #          and is auto restarted by the autoscale group then the new auth server
    #          will not have any of the previous configuration, because the old DB
    #          was lost. Using an RDS for development fixes this at the cost of having
    #          the core config taking longer to launch.
    USE_DB = bosslet_config.AUTH_RDS
    if USE_DB:
        deps.append("AuthDB")
        user_data["aws"][
            "db"] = "keycloak"  # flag for init script for which config to use

    cert = aws.cert_arn_lookup(session, names.public_dns('auth'))
    create_asg_elb(config,
                   "Auth",
                   names.auth.dns,
                   aws.ami_lookup(bosslet_config, names.auth.ami),
                   keypair,
                   str(user_data),
                   const.AUTH_CLUSTER_SIZE,
                   internal_subnets_asg,
                   external_subnets_asg, [("443", "8080", "HTTPS", cert)],
                   "HTTP:8080/index.html",
                   sgs=[Ref("AuthSecurityGroup")],
                   type_=const.AUTH_TYPE,
                   depends_on=deps)
    config.add_public_dns('AuthLoadBalancer', names.public_dns('auth'))

    if USE_DB:
        config.add_rds_db("AuthDB",
                          names.auth_db.rds,
                          "3306",
                          "keycloak",
                          "keycloak",
                          "keycloak",
                          internal_subnets,
                          type_="db.t2.micro",
                          security_groups=[Ref("InternalSecurityGroup")])

    config.add_lambda("DNSLambda",
                      names.dns.lambda_,
                      aws.role_arn_lookup(session, 'UpdateRoute53'),
                      const.DNS_LAMBDA,
                      handler="index.handler",
                      timeout=10,
                      depends_on="DNSZone")

    config.add_lambda_permission("DNSLambdaExecute", Ref("DNSLambda"))

    config.add_sns_topic("DNSSNS", names.dns.sns, names.dns.sns,
                         [("lambda", Arn("DNSLambda"))])

    config.add_security_group("InternalSecurityGroup", names.internal.sg,
                              [("-1", "-1", "-1", bosslet_config.NETWORK)])

    # Allow SSH access to bastion from anywhere
    incoming_subnet = bosslet_config.SSH_INBOUND
    config.add_security_group("BastionSecurityGroup", names.ssh.sg,
                              [("tcp", "22", "22", incoming_subnet)])

    incoming_subnet = bosslet_config.HTTPS_INBOUND
    boss_subnet = {
        "Fn::Join": ["/", [Ref("NATIP"), "32"]]
    }  # Allow requests from the endpoint via the NAT gateway
    # Needed in case HTTPS_INBOUND doesn't include the gateway's IP
    config.add_security_group(
        "AuthSecurityGroup",
        #names.https.sg, DP XXX: hack until we can get production updated correctly
        names.auth.sg,
        [("tcp", "443", "443", incoming_subnet),
         ("tcp", "443", "443", boss_subnet)])

    # Create the internal route table to route traffic to the NAT Bastion
    all_internal_subnets = internal_subnets.copy()
    all_internal_subnets.append(Ref("InternalSubnet"))
    config.add_route_table("InternalRouteTable",
                           names.internal.rt,
                           subnets=all_internal_subnets)

    config.add_route_table_route("InternalNatRoute",
                                 Ref("InternalRouteTable"),
                                 nat=Ref("NAT"),
                                 depends_on="NAT")

    # Create the internet gateway and internet router
    all_external_subnets = external_subnets.copy()
    all_external_subnets.append(Ref("ExternalSubnet"))
    config.add_route_table("InternetRouteTable",
                           names.internet.rt,
                           subnets=all_external_subnets)

    config.add_route_table_route("InternetRoute",
                                 Ref("InternetRouteTable"),
                                 gateway=Ref("InternetGateway"),
                                 depends_on="AttachInternetGateway")

    config.add_internet_gateway("InternetGateway", names.internet.gw)
    config.add_endpoint("S3Endpoint", "s3",
                        [Ref("InternalRouteTable"),
                         Ref('InternetRouteTable')])
    config.add_endpoint("DynamoDBEndpoint", "dynamodb",
                        [Ref("InternalRouteTable"),
                         Ref('InternetRouteTable')])
    config.add_nat("NAT",
                   Ref("ExternalSubnet"),
                   depends_on="AttachInternetGateway")

    return config
コード例 #16
0
ファイル: backup.py プロジェクト: reconstrue/boss-manage
def create_config(bosslet_config):
    """Create the CloudFormationConfiguration object."""
    config = CloudFormationConfiguration('backup', bosslet_config)
    names = bosslet_config.names

    # DP NOTE: During implementation there was/is an availability zone that
    #          could not run the T2.Micro instances used by this Data Pipeline
    azs = aws.azs_lookup(bosslet_config, 'datapipeline')
    az = random.choice(azs)[1] + '-'
    internal_subnet = aws.subnet_id_lookup(
        bosslet_config.session, az + bosslet_config.names.internal.subnet)
    backup_image = aws.ami_lookup(bosslet_config, names.backup.ami)[0]

    s3_backup = "s3://" + names.backup.s3 + "/#{format(@scheduledStartTime, 'YYYY-ww')}"
    s3_logs = "s3://" + names.backup.s3 + "/logs"

    # DP TODO: Create all BOSS S3 buckets as part of the account setup
    #          as the Cloud Formation delete doesn't delete the bucket,
    #          making this a conditional add
    BUCKET_DEPENDENCY = None  # Needed as the pipelines try to execute when launched
    if not aws.s3_bucket_exists(bosslet_config.session, names.backup.s3):
        life_cycle = {
            'Rules': [{
                'Id': 'Delete Data',
                'Status': 'Enabled',
                'ExpirationInDays': 180,  # ~6 Months
            }]
        }
        encryption = {
            'ServerSideEncryptionConfiguration': [{
                'ServerSideEncryptionByDefault': {
                    'SSEAlgorithm': 'AES256'
                }
            }]
        }
        config.add_s3_bucket("BackupBucket",
                             names.backup.s3,
                             life_cycle_config=life_cycle,
                             encryption=encryption)
        BUCKET_DEPENDENCY = "BackupBucket"

    # Vault Backup
    cmd = "/usr/local/bin/python3 ~/vault.py backup {}".format(
        bosslet_config.INTERNAL_DOMAIN)
    pipeline = DataPipeline(log_uri=s3_logs, resource_role="backup")
    pipeline.add_shell_command("VaultBackup",
                               cmd,
                               destination=Ref("VaultBucket"),
                               runs_on=Ref("VaultInstance"))
    pipeline.add_s3_bucket("VaultBucket", s3_backup + "/vault")
    pipeline.add_ec2_instance("VaultInstance",
                              subnet=internal_subnet,
                              image=backup_image)
    config.add_data_pipeline("VaultBackupPipeline",
                             "vault-backup." + bosslet_config.INTERNAL_DOMAIN,
                             pipeline.objects,
                             depends_on=BUCKET_DEPENDENCY)

    # DynamoDB Backup
    tables = {
        "BossMeta": names.meta.ddb,
        "S3Index": names.s3_index.ddb,
        "TileIndex": names.tile_index.ddb,
        "IdIndex": names.id_index.ddb,
        "IdCountIndex": names.id_count_index.ddb,
        "VaultData": names.vault.ddb,
    }

    pipeline = DataPipeline(log_uri=s3_logs)
    pipeline.add_emr_cluster("BackupCluster", region=bosslet_config.REGION)

    for name in tables:
        table = tables[name]
        pipeline.add_s3_bucket(name + "Bucket", s3_backup + "/DDB/" + table)
        pipeline.add_ddb_table(name, table)
        pipeline.add_emr_copy(name + "Copy",
                              Ref(name),
                              Ref(name + "Bucket"),
                              runs_on=Ref("BackupCluster"),
                              region=bosslet_config.REGION)

    config.add_data_pipeline("DDBPipeline",
                             "dynamo-backup." + bosslet_config.INTERNAL_DOMAIN,
                             pipeline.objects,
                             depends_on=BUCKET_DEPENDENCY)

    # Endpoint RDS Backup
    pipeline = rds_copy(names.endpoint_db.rds, internal_subnet, backup_image,
                        s3_logs, s3_backup)
    config.add_data_pipeline("EndpointPipeline",
                             "endpoint-backup." +
                             bosslet_config.INTERNAL_DOMAIN,
                             pipeline.objects,
                             depends_on=BUCKET_DEPENDENCY)

    # Auth RDS Backup
    if bosslet_config.AUTH_RDS:
        pipeline = rds_copy(names.auth_db.rds, internal_subnet, backup_image,
                            s3_logs, s3_backup)
        config.add_data_pipeline("AuthPipeline",
                                 "auth-backup." +
                                 bosslet_config.INTERNAL_DOMAIN,
                                 pipeline.objects,
                                 depends_on=BUCKET_DEPENDENCY)

    return config
コード例 #17
0
ファイル: activities.py プロジェクト: lrodri29/boss-manage
def create_config(session, domain):
    """Create the CloudFormationConfiguration object."""
    config = CloudFormationConfiguration('activities', domain, const.REGION)
    names = AWSNames(domain)

    global keypair
    keypair = aws.keypair_lookup(session)

    vpc_id = config.find_vpc(session)
    sgs = aws.sg_lookup_all(session, vpc_id)
    internal_subnets, _ = config.find_all_availability_zones(session)
    internal_subnets_lambda, _ = config.find_all_availability_zones(
        session, lambda_compatible_only=True)
    topic_arn = aws.sns_topic_lookup(session, "ProductionMicronsMailingList")
    event_data = {
        "lambda-name": "delete_lambda",
        "db": names.endpoint_db,
        "meta-db": names.meta,
        "s3-index-table": names.s3_index,
        "id-index-table": names.id_index,
        "id-count-table": names.id_count_index,
        "cuboid_bucket": names.cuboid_bucket,
        "delete_bucket": names.delete_bucket,
        "topic-arn": topic_arn,
        "query-deletes-sfn-name": names.query_deletes,
        "delete-sfn-name": names.delete_cuboid,
        "delete-exp-sfn-name": names.delete_experiment,
        "delete-coord-frame-sfn-name": names.delete_coord_frame,
        "delete-coll-sfn-name": names.delete_collection
    }

    role_arn = aws.role_arn_lookup(session, "events_for_delete_lambda")
    multi_lambda = names.multi_lambda
    lambda_arn = aws.lambda_arn_lookup(session, multi_lambda)
    target_list = [{
        "Arn": lambda_arn,
        "Id": multi_lambda,
        "Input": json.dumps(event_data)
    }]
    schedule_expression = "cron(1 6-11/1 ? * TUE-FRI *)"
    #schedule_expression = "cron(0/2 * * * ? *)"  # testing fire every two minutes

    config.add_event_rule("DeleteEventRule",
                          names.delete_event_rule,
                          role_arn=role_arn,
                          schedule_expression=schedule_expression,
                          target_list=target_list,
                          description=None)
    # Events have to be given permission to run lambda.
    config.add_lambda_permission('DeleteRulePerm',
                                 multi_lambda,
                                 principal='events.amazonaws.com',
                                 source=Arn('DeleteEventRule'))
    user_data = UserData()
    user_data["system"]["fqdn"] = names.activities
    user_data["system"]["type"] = "activities"
    user_data["aws"]["db"] = names.endpoint_db
    user_data["aws"]["cache"] = names.cache
    user_data["aws"]["cache-state"] = names.cache_state
    user_data["aws"]["cache-db"] = "0"
    user_data["aws"]["cache-state-db"] = "0"
    user_data["aws"]["meta-db"] = names.meta
    user_data["aws"]["cuboid_bucket"] = names.cuboid_bucket
    user_data["aws"]["tile_bucket"] = names.tile_bucket
    user_data["aws"]["ingest_bucket"] = names.ingest_bucket
    user_data["aws"]["s3-index-table"] = names.s3_index
    user_data["aws"]["tile-index-table"] = names.tile_index
    user_data["aws"]["id-index-table"] = names.id_index
    user_data["aws"]["id-count-table"] = names.id_count_index

    config.add_autoscale_group("Activities",
                               names.activities,
                               aws.ami_lookup(session, 'activities.boss'),
                               keypair,
                               subnets=internal_subnets_lambda,
                               type_=const.ACTIVITIES_TYPE,
                               security_groups=[sgs[names.internal]],
                               user_data=str(user_data),
                               role=aws.instance_profile_arn_lookup(
                                   session, "activities"),
                               min=1,
                               max=1)

    config.add_lambda("IngestLambda",
                      names.ingest_lambda,
                      aws.role_arn_lookup(session, 'IngestQueueUpload'),
                      const.INGEST_LAMBDA,
                      handler="index.handler",
                      timeout=60 * 5)

    config.add_lambda_permission("IngestLambdaExecute", Ref("IngestLambda"))

    return config