def create_config(session, domain, keypair=None, user_data=None): """ Create the CloudFormationConfiguration object. Args: session: amazon session object domain: domain of the stack being created keypair: keypair used to by instances being created user_data (UserData): information used by the endpoint instance and vault. Data will be run through the CloudFormation Fn::Join template intrinsic function so other template intrinsic functions used in the user_data will be parsed and executed. Returns: the config for the Cloud Formation stack """ # Prepare user data for parsing by CloudFormation. if user_data is not None: parsed_user_data = { "Fn::Join" : ["", user_data.format_for_cloudformation()]} else: parsed_user_data = user_data names = AWSNames(domain) config = CloudFormationConfiguration("cachedb", domain, const.REGION) vpc_id = config.find_vpc(session) # Create several subnets for all the lambdas to use. lambda_azs = aws.azs_lookup(session, lambda_compatible_only=True) internal_route_table_id = aws.rt_lookup(session, vpc_id, names.internal) print("AZs for lambda: " + str(lambda_azs)) lambda_subnets = [] for i in range(const.LAMBDA_SUBNETS): key = 'LambdaSubnet{}'.format(i) lambda_subnets.append(Ref(key)) config.add_subnet(key, names.subnet('lambda{}'.format(i)), az=lambda_azs[i % len(lambda_azs)][0]) config.add_route_table_association(key + "RTA", internal_route_table_id, Ref(key)) # Lookup the External Subnet, Internal Security Group IDs that are # needed by other resources internal_subnet_id = aws.subnet_id_lookup(session, names.subnet("internal")) config.add_arg(Arg.Subnet("InternalSubnet", internal_subnet_id, "ID of Internal Subnet to create resources in")) internal_sg_id = aws.sg_lookup(session, vpc_id, names.internal) config.add_arg(Arg.SecurityGroup("InternalSecurityGroup", internal_sg_id, "ID of internal Security Group")) role = aws.role_arn_lookup(session, "lambda_cache_execution") config.add_arg(Arg.String("LambdaCacheExecutionRole", role, "IAM role for multilambda." + domain)) cuboid_import_role = aws.role_arn_lookup(session, CUBOID_IMPORT_ROLE) config.add_arg(Arg.String(CUBOID_IMPORT_ROLE, cuboid_import_role, "IAM role for cuboidImport." + domain)) config.add_capabilities(['CAPABILITY_IAM']) # Allow updating S3 index table with cuboid's object key during # volumetric ingest. # Example of s3_index_arn form: arn:aws:dynamodb:us-east-1:12345678:table/s3index.*.boss config.add_iam_policy_to_role( 'S3IndexPutItem{}'.format(domain).replace('.', ''), get_s3_index_arn(session, domain).replace(domain,'*.') + domain.split('.')[1], [CUBOID_IMPORT_ROLE], ['dynamodb:PutItem']) cuboid_bucket_name = names.cuboid_bucket if not aws.s3_bucket_exists(session, cuboid_bucket_name): config.add_s3_bucket("cuboidBucket", cuboid_bucket_name) config.add_s3_bucket_policy( "cuboidBucketPolicy", cuboid_bucket_name, ['s3:GetObject', 's3:PutObject'], { 'AWS': role}) config.append_s3_bucket_policy( "cuboidBucketPolicy", cuboid_bucket_name, ['s3:PutObject'], { 'AWS': cuboid_import_role}) delete_bucket_name = names.delete_bucket if not aws.s3_bucket_exists(session, delete_bucket_name): config.add_s3_bucket("deleteBucket", delete_bucket_name) config.add_s3_bucket_policy( "deleteBucketPolicy", delete_bucket_name, ['s3:GetObject', 's3:PutObject'], { 'AWS': role}) tile_bucket_name = names.tile_bucket if not aws.s3_bucket_exists(session, tile_bucket_name): life_cycle_cfg = get_cf_bucket_life_cycle_rules() config.add_s3_bucket( "tileBucket", tile_bucket_name, life_cycle_config=life_cycle_cfg) config.add_s3_bucket_policy( "tileBucketPolicy", tile_bucket_name, ['s3:GetObject', 's3:PutObject'], { 'AWS': role}) # The ingest bucket is a staging area for cuboids uploaded during volumetric ingest. creating_ingest_bucket = False ingest_bucket_name = names.ingest_bucket if not aws.s3_bucket_exists(session, ingest_bucket_name): creating_ingest_bucket = True ing_bucket_life_cycle_cfg = get_cf_bucket_life_cycle_rules() config.add_s3_bucket("ingestBucket", ingest_bucket_name, life_cycle_config=ing_bucket_life_cycle_cfg) config.add_s3_bucket_policy( "ingestBucketPolicy", ingest_bucket_name, ['s3:GetObject', 's3:PutObject', 's3:PutObjectTagging'], { 'AWS': cuboid_import_role}) config.add_ec2_instance("CacheManager", names.cache_manager, aws.ami_lookup(session, "cachemanager.boss"), keypair, subnet=Ref("InternalSubnet"), public_ip=False, type_=const.CACHE_MANAGER_TYPE, security_groups=[Ref("InternalSecurityGroup")], user_data=parsed_user_data, role="cachemanager") config.add_sqs_queue( names.ingest_cleanup_dlq, names.ingest_cleanup_dlq, 30, 20160) config.add_sqs_queue( names.cuboid_import_dlq, names.cuboid_import_dlq, 30, 20160) config.add_sqs_policy('cuboidImportDlqPolicy', 'cuboidImportDlqPolicy', [Ref(names.cuboid_import_dlq)], cuboid_import_role) lambda_bucket = aws.get_lambda_s3_bucket(session) config.add_lambda("MultiLambda", names.multi_lambda, Ref("LambdaCacheExecutionRole"), s3=(lambda_bucket, "multilambda.{}.zip".format(domain), "lambda_loader.handler"), timeout=120, memory=1536, security_groups=[Ref('InternalSecurityGroup')], subnets=lambda_subnets, runtime='python3.6') config.add_lambda("TileUploadedLambda", names.tile_uploaded_lambda, Ref("LambdaCacheExecutionRole"), s3=(lambda_bucket, "multilambda.{}.zip".format(domain), "tile_uploaded_lambda.handler"), timeout=5, memory=1024, runtime='python3.6') config.add_lambda("TileIngestLambda", names.tile_ingest_lambda, Ref("LambdaCacheExecutionRole"), s3=(lambda_bucket, "multilambda.{}.zip".format(domain), "tile_ingest_lambda.handler"), timeout=30, memory=1536, runtime='python3.6') config.add_lambda("DeleteTileObjsLambda", names.delete_tile_objs_lambda, Ref("LambdaCacheExecutionRole"), s3=(lambda_bucket, "multilambda.{}.zip".format(domain), "delete_tile_objs_lambda.handler"), timeout=90, memory=128, runtime='python3.6', dlq=Arn(names.ingest_cleanup_dlq)) config.add_lambda("DeleteTileEntryLambda", names.delete_tile_index_entry_lambda, Ref("LambdaCacheExecutionRole"), s3=(lambda_bucket, "multilambda.{}.zip".format(domain), "delete_tile_index_entry_lambda.handler"), timeout=90, memory=128, runtime='python3.6', dlq=Arn(names.ingest_cleanup_dlq)) config.add_lambda("CuboidImportLambda", names.cuboid_import_lambda, Ref(CUBOID_IMPORT_ROLE), s3=(lambda_bucket, "multilambda.{}.zip".format(domain), "cuboid_import_lambda.handler"), timeout=90, memory=128, runtime='python3.6', dlq=Arn(names.cuboid_import_dlq)) config.add_lambda("VolumetricIngestLambda", names.volumetric_ingest_queue_upload_lambda, Ref("LambdaCacheExecutionRole"), s3=(lambda_bucket, "multilambda.{}.zip".format(domain), "ingest_queue_upload_volumetric_lambda.handler"), timeout=120, memory=1024, runtime='python3.6') if creating_ingest_bucket: config.add_lambda_permission( 'ingestBucketInvokeCuboidImportLambda', names.cuboid_import_lambda, principal='s3.amazonaws.com', source={ 'Fn::Join': [':', ['arn', 'aws', 's3', '', '', ingest_bucket_name]]}, #DP TODO: move into constants depends_on=['ingestBucket', 'CuboidImportLambda'] ) else: # NOTE: this permission doesn't seem to apply properly when doing a # CloudFormation update. During testing, I had to manually apply this # permission before the bucket trigger could be applied in post_init(). # Doing a CloudFormation delete followed by a create did not have a # problem. config.add_lambda_permission( 'ingestBucketInvokeCuboidImportLambda', names.cuboid_import_lambda, principal='s3.amazonaws.com', source={ 'Fn::Join': [':', ['arn', 'aws', 's3', '', '', ingest_bucket_name]]}, depends_on='CuboidImportLambda' ) # Add topic to indicating that the object store has been write locked. # Now using "production mailing list" instead of separate write lock topic. #config.add_sns_topic('WriteLock', # names.write_lock_topic, # names.write_lock, # []) # TODO: add subscribers return config
def create_config(bosslet_config, user_data=None): """ Create the CloudFormationConfiguration object. Args: bosslet_config (BossConfiguration): target bosslet user_data (UserData): information used by the endpoint instance and vault. Data will be run through the CloudFormation Fn::Join template intrinsic function so other template intrinsic functions used in the user_data will be parsed and executed. Returns: the config for the Cloud Formation stack """ # Prepare user data for parsing by CloudFormation. if user_data is not None: parsed_user_data = { "Fn::Join" : ["", user_data.format_for_cloudformation()]} else: parsed_user_data = user_data keypair = bosslet_config.SSH_KEY session = bosslet_config.session names = bosslet_config.names config = CloudFormationConfiguration("cachedb", bosslet_config) vpc_id = config.find_vpc() ##### # TODO: When CF config files are refactored for multi-account support # the creation of _all_ subnets should be moved into core. # AWS doesn't charge for the VPC or subnets, so it doesn't # increase cost and cleans up subnet creation # Create several subnets for all the lambdas to use. internal_route_table_id = aws.rt_lookup(session, vpc_id, names.internal.rt) lambda_subnets = config.add_all_lambda_subnets() for lambda_subnet in lambda_subnets: key = lambda_subnet['Ref'] config.add_route_table_association(key + "RTA", internal_route_table_id, lambda_subnet) # Create a custom resource to help delete ENIs from lambdas # DP NOTE: After deleting a lambda the ENIs may stick around for while, causing the stack delete to fail # See https://stackoverflow.com/a/41310289 config.add_arg(Arg.String('StackName', config.stack_name)) config.add_arg(Arg.String('DeleteENILambda', aws.lambda_arn_lookup(session, names.delete_eni.lambda_))) config.add_custom_resource('DeleteENI', 'DeleteENI', Ref('DeleteENILambda'), StackName = Ref('StackName')) # Lookup the External Subnet, Internal Security Group IDs that are # needed by other resources internal_subnet_id = aws.subnet_id_lookup(session, names.internal.subnet) config.add_arg(Arg.Subnet("InternalSubnet", internal_subnet_id, "ID of Internal Subnet to create resources in")) internal_sg_id = aws.sg_lookup(session, vpc_id, names.internal.sg) config.add_arg(Arg.SecurityGroup("InternalSecurityGroup", internal_sg_id, "ID of internal Security Group")) role = aws.role_arn_lookup(session, "lambda_cache_execution") config.add_arg(Arg.String("LambdaCacheExecutionRole", role, "IAM role for " + names.multi_lambda.lambda_)) cuboid_import_role = aws.role_arn_lookup(session, CUBOID_IMPORT_ROLE) config.add_arg(Arg.String(CUBOID_IMPORT_ROLE, cuboid_import_role, "IAM role for cuboidImport")) config.add_capabilities(['CAPABILITY_IAM']) cuboid_bucket_name = names.cuboid_bucket.s3 if not aws.s3_bucket_exists(session, cuboid_bucket_name): config.add_s3_bucket("cuboidBucket", cuboid_bucket_name) config.add_s3_bucket_policy( "cuboidBucketPolicy", cuboid_bucket_name, ['s3:GetObject', 's3:PutObject'], { 'AWS': role}) config.append_s3_bucket_policy( "cuboidBucketPolicy", cuboid_bucket_name, ['s3:PutObject'], { 'AWS': cuboid_import_role}) delete_bucket_name = names.delete_bucket.s3 if not aws.s3_bucket_exists(session, delete_bucket_name): config.add_s3_bucket("deleteBucket", delete_bucket_name) config.add_s3_bucket_policy( "deleteBucketPolicy", delete_bucket_name, ['s3:GetObject', 's3:PutObject'], { 'AWS': role}) tile_bucket_name = names.tile_bucket.s3 if not aws.s3_bucket_exists(session, tile_bucket_name): life_cycle_cfg = get_cf_bucket_life_cycle_rules() config.add_s3_bucket( "tileBucket", tile_bucket_name, life_cycle_config=life_cycle_cfg) config.add_s3_bucket_policy( "tileBucketPolicy", tile_bucket_name, ['s3:GetObject', 's3:PutObject'], { 'AWS': role}) # The ingest bucket is a staging area for cuboids uploaded during volumetric ingest. creating_ingest_bucket = False ingest_bucket_name = names.ingest_bucket.s3 if not aws.s3_bucket_exists(session, ingest_bucket_name): creating_ingest_bucket = True ing_bucket_life_cycle_cfg = get_cf_bucket_life_cycle_rules() config.add_s3_bucket("ingestBucket", ingest_bucket_name, life_cycle_config=ing_bucket_life_cycle_cfg) config.add_s3_bucket_policy( "ingestBucketPolicy", ingest_bucket_name, ['s3:GetObject', 's3:PutObject', 's3:PutObjectTagging'], { 'AWS': cuboid_import_role}) config.add_ec2_instance("CacheManager", names.cachemanager.dns, aws.ami_lookup(bosslet_config, names.cachemanager.ami), keypair, subnet=Ref("InternalSubnet"), public_ip=False, type_=const.CACHE_MANAGER_TYPE, security_groups=[Ref("InternalSecurityGroup")], user_data=parsed_user_data, role="cachemanager") config.add_sqs_queue( names.ingest_cleanup_dlq.sqs, names.ingest_cleanup_dlq.sqs, 30, 20160) config.add_sqs_queue( names.cuboid_import_dlq.sqs, names.cuboid_import_dlq.sqs, 30, 20160) config.add_sqs_policy('cuboidImportDlqPolicy', 'cuboidImportDlqPolicy', [Ref(names.cuboid_import_dlq.sqs)], cuboid_import_role) config.add_lambda("MultiLambda", names.multi_lambda.lambda_, Ref("LambdaCacheExecutionRole"), handler='lambda_loader.handler', timeout=120, memory=1536, security_groups=[Ref('InternalSecurityGroup')], subnets=lambda_subnets) config.add_lambda("TileUploadedLambda", names.tile_uploaded.lambda_, Ref("LambdaCacheExecutionRole"), handler='tile_uploaded_lambda.handler', timeout=5, memory=1024) config.add_lambda("TileIngestLambda", names.tile_ingest.lambda_, Ref("LambdaCacheExecutionRole"), handler='tile_ingest_lambda.handler', timeout=30, memory=1536) config.add_lambda("DeleteTileObjsLambda", names.delete_tile_objs.lambda_, Ref("LambdaCacheExecutionRole"), handler='delete_tile_objs_lambda.handler', timeout=90, memory=128, dlq=Arn(names.ingest_cleanup_dlq.sqs)) config.add_lambda("DeleteTileEntryLambda", names.delete_tile_index_entry.lambda_, Ref("LambdaCacheExecutionRole"), handler='delete_tile_index_entry_lambda.handler', timeout=90, memory=128, dlq=Arn(names.ingest_cleanup_dlq.sqs)) config.add_lambda("CuboidImportLambda", names.cuboid_import_lambda.lambda_, Ref(CUBOID_IMPORT_ROLE), handler='cuboid_import_lambda.handler', timeout=90, memory=128, dlq=Arn(names.cuboid_import_dlq.sqs)) config.add_lambda("VolumetricIngestLambda", names.volumetric_ingest_queue_upload_lambda.lambda_, Ref("LambdaCacheExecutionRole"), handler='ingest_queue_upload_volumetric_lambda.handler', timeout=120, memory=1024) if creating_ingest_bucket: config.add_lambda_permission( 'ingestBucketInvokeCuboidImportLambda', names.cuboid_import_lambda.lambda_, principal='s3.amazonaws.com', source={ 'Fn::Join': [':', ['arn', 'aws', 's3', '', '', ingest_bucket_name]]}, #DP TODO: move into constants depends_on=['ingestBucket', 'CuboidImportLambda'] ) else: # NOTE: this permission doesn't seem to apply properly when doing a # CloudFormation update. During testing, I had to manually apply this # permission before the bucket trigger could be applied in post_init(). # Doing a CloudFormation delete followed by a create did not have a # problem. config.add_lambda_permission( 'ingestBucketInvokeCuboidImportLambda', names.cuboid_import_lambda.lambda_, principal='s3.amazonaws.com', source={ 'Fn::Join': [':', ['arn', 'aws', 's3', '', '', ingest_bucket_name]]}, depends_on='CuboidImportLambda' ) # Add topic to indicating that the object store has been write locked. # Now using "production mailing list" instead of separate write lock topic. #config.add_sns_topic('WriteLock', # names.write_lock_topic, # names.write_lock, # []) # TODO: add subscribers return config
def create_config(session, domain, keypair=None, user_data=None): """ Create the CloudFormationConfiguration object. Args: session: amazon session object domain: domain of the stack being created keypair: keypair used to by instances being created user_data (UserData): information used by the endpoint instance and vault. Data will be run through the CloudFormation Fn::Join template intrinsic function so other template intrinsic functions used in the user_data will be parsed and executed. Returns: the config for the Cloud Formation stack """ # Prepare user data for parsing by CloudFormation. if user_data is not None: parsed_user_data = { "Fn::Join": ["", user_data.format_for_cloudformation()] } else: parsed_user_data = user_data names = AWSNames(domain) config = CloudFormationConfiguration("cachedb", domain, const.REGION) vpc_id = config.find_vpc(session) # Create several subnets for all the lambdas to use. lambda_azs = aws.azs_lookup(session, lambda_compatible_only=True) internal_route_table_id = aws.rt_lookup(session, vpc_id, names.internal) print("AZs for lambda: " + str(lambda_azs)) lambda_subnets = [] for i in range(const.LAMBDA_SUBNETS): key = 'LambdaSubnet{}'.format(i) lambda_subnets.append(Ref(key)) config.add_subnet(key, names.subnet('lambda{}'.format(i)), az=lambda_azs[i % len(lambda_azs)][0]) config.add_route_table_association(key + "RTA", internal_route_table_id, Ref(key)) # Lookup the External Subnet, Internal Security Group IDs that are # needed by other resources internal_subnet_id = aws.subnet_id_lookup(session, names.subnet("internal")) config.add_arg( Arg.Subnet("InternalSubnet", internal_subnet_id, "ID of Internal Subnet to create resources in")) internal_sg_id = aws.sg_lookup(session, vpc_id, names.internal) config.add_arg( Arg.SecurityGroup("InternalSecurityGroup", internal_sg_id, "ID of internal Security Group")) role = aws.role_arn_lookup(session, "lambda_cache_execution") config.add_arg( Arg.String("LambdaCacheExecutionRole", role, "IAM role for multilambda." + domain)) index_bucket_name = names.cuboid_bucket if not aws.s3_bucket_exists(session, index_bucket_name): config.add_s3_bucket("cuboidBucket", index_bucket_name) config.add_s3_bucket_policy("cuboidBucketPolicy", index_bucket_name, ['s3:GetObject', 's3:PutObject'], {'AWS': role}) delete_bucket_name = names.delete_bucket if not aws.s3_bucket_exists(session, delete_bucket_name): config.add_s3_bucket("deleteBucket", delete_bucket_name) config.add_s3_bucket_policy("deleteBucketPolicy", delete_bucket_name, ['s3:GetObject', 's3:PutObject'], {'AWS': role}) creating_tile_bucket = False tile_bucket_name = names.tile_bucket if not aws.s3_bucket_exists(session, tile_bucket_name): creating_tile_bucket = True config.add_s3_bucket("tileBucket", tile_bucket_name) config.add_s3_bucket_policy("tileBucketPolicy", tile_bucket_name, ['s3:GetObject', 's3:PutObject'], {'AWS': role}) ingest_bucket_name = names.ingest_bucket if not aws.s3_bucket_exists(session, ingest_bucket_name): config.add_s3_bucket("ingestBucket", ingest_bucket_name) config.add_s3_bucket_policy("ingestBucketPolicy", ingest_bucket_name, ['s3:GetObject', 's3:PutObject'], {'AWS': role}) config.add_ec2_instance("CacheManager", names.cache_manager, aws.ami_lookup(session, "cachemanager.boss"), keypair, subnet=Ref("InternalSubnet"), public_ip=False, type_=const.CACHE_MANAGER_TYPE, security_groups=[Ref("InternalSecurityGroup")], user_data=parsed_user_data, role="cachemanager") lambda_bucket = aws.get_lambda_s3_bucket(session) config.add_lambda("MultiLambda", names.multi_lambda, Ref("LambdaCacheExecutionRole"), s3=(aws.get_lambda_s3_bucket(session), "multilambda.{}.zip".format(domain), "lambda_loader.handler"), timeout=120, memory=1024, security_groups=[Ref('InternalSecurityGroup')], subnets=lambda_subnets, runtime='python3.6') if creating_tile_bucket: config.add_lambda_permission( 'tileBucketInvokeMultiLambda', names.multi_lambda, principal='s3.amazonaws.com', source={ 'Fn::Join': [':', ['arn', 'aws', 's3', '', '', tile_bucket_name]] }, #DP TODO: move into constants depends_on=['tileBucket', 'MultiLambda']) else: config.add_lambda_permission( 'tileBucketInvokeMultiLambda', names.multi_lambda, principal='s3.amazonaws.com', source={ 'Fn::Join': [':', ['arn', 'aws', 's3', '', '', tile_bucket_name]] }, depends_on='MultiLambda') # Add topic to indicating that the object store has been write locked. # Now using "production mailing list" instead of separate write lock topic. #config.add_sns_topic('WriteLock', # names.write_lock_topic, # names.write_lock, # []) # TODO: add subscribers return config
def create_config(bosslet_config): """Create the CloudFormationConfiguration object. :arg session used to perform lookups :arg domain DNS name of vpc """ config = CloudFormationConfiguration('cloudwatch', bosslet_config) names = bosslet_config.names session = bosslet_config.session domain = bosslet_config.INTERNAL_DOMAIN vpc_id = config.find_vpc() lambda_subnets, _ = config.find_all_subnets(compatibility='lambda') internal_sg = aws.sg_lookup(session, vpc_id, names.internal.sg) loadbalancer_name = names.endpoint_elb.dns if not aws.lb_lookup(session, loadbalancer_name): raise MissingResourceError('ELB', loadbalancer_name) # TODO Test that MailingListTopic is working. production_mailing_list = bosslet_config.ALERT_TOPIC mailing_list_arn = aws.sns_topic_lookup(session, production_mailing_list) if mailing_list_arn is None: raise MissingResourceError('SNS topic', bosslet_config.ALERT_TOPIC) config.add_cloudwatch(loadbalancer_name, [mailing_list_arn]) lambda_role = aws.role_arn_lookup(session, 'VaultConsulHealthChecker') config.add_arg( Arg.String('VaultConsulHealthChecker', lambda_role, 'IAM role for vault/consul health check')) config.add_lambda('VaultLambda', names.vault_monitor.lambda_, description='Check health of vault instances.', timeout=30, role=Ref('VaultConsulHealthChecker'), security_groups=[internal_sg], subnets=lambda_subnets, handler='index.lambda_handler', file=const.VAULT_LAMBDA) # Lambda input data json_str = json.dumps({ 'hostname': names.vault.dns, }) config.add_cloudwatch_rule('VaultCheck', name=names.vault_check.cw, description='Check health of vault instances.', targets=[ { 'Arn': Arn('VaultLambda'), 'Id': names.vault_monitor.lambda_, 'Input': json_str }, ], schedule='rate(2 minutes)', depends_on=['VaultLambda']) config.add_lambda_permission('VaultPerms', names.vault_monitor.lambda_, principal='events.amazonaws.com', source=Arn('VaultCheck')) return config
def create_config(session, domain): """Create the CloudFormationConfiguration object. :arg session used to perform lookups :arg domain DNS name of vpc """ config = CloudFormationConfiguration('cloudwatch', domain) names = AWSNames(domain) vpc_id = config.find_vpc(session) lambda_subnets, _ = config.find_all_availability_zones(session, lambda_compatible_only=True) internal_sg = aws.sg_lookup(session, vpc_id, names.internal) loadbalancer_name = names.endpoint_elb if not aws.lb_lookup(session, loadbalancer_name): raise Exception("Invalid load balancer name: " + loadbalancer_name) # TODO Test that MailingListTopic is working. production_mailing_list = const.PRODUCTION_MAILING_LIST mailing_list_arn = aws.sns_topic_lookup(session, production_mailing_list) if mailing_list_arn is None: #config.add_sns_topic("topicList", production_mailing_list) msg = "MailingList {} needs to be created before running config" raise Exception(msg.format(const.PRODUCTION_MAILING_LIST)) config.add_cloudwatch(loadbalancer_name, [mailing_list_arn]) lambda_role = aws.role_arn_lookup(session, 'VaultConsulHealthChecker') config.add_arg(Arg.String( 'VaultConsulHealthChecker', lambda_role, 'IAM role for vault/consul health check.' + domain)) config.add_lambda('VaultLambda', names.vault_monitor, description='Check health of vault instances.', timeout=30, role=Ref('VaultConsulHealthChecker'), security_groups=[internal_sg], subnets=lambda_subnets, handler='index.lambda_handler', file=const.VAULT_LAMBDA) config.add_lambda('ConsulLambda', names.consul_monitor, description='Check health of vault instances.', timeout=30, role=Ref('VaultConsulHealthChecker'), security_groups=[internal_sg], subnets=lambda_subnets, handler='index.lambda_handler', file=const.CONSUL_LAMBDA) # Lambda input data json_str = json.dumps({ 'vpc_id': vpc_id, 'vpc_name': domain, 'topic_arn': mailing_list_arn, }) config.add_cloudwatch_rule('VaultConsulCheck', name=names.vault_consul_check, description='Check health of vault and consul instances.', targets=[ { 'Arn': Arn('VaultLambda'), 'Id': names.vault_monitor, 'Input': json_str }, { 'Arn': Arn('ConsulLambda'), 'Id': names.consul_monitor, 'Input': json_str }, ], schedule='rate(1 minute)', depends_on=['VaultLambda', 'ConsulLambda']) config.add_lambda_permission('VaultPerms', names.vault_monitor, principal='events.amazonaws.com', source=Arn('VaultConsulCheck')) config.add_lambda_permission('ConsulPerms', names.consul_monitor, principal='events.amazonaws.com', source=Arn('VaultConsulCheck')) return config
def create_config(bosslet_config): names = bosslet_config.names session = bosslet_config.session config = CloudFormationConfiguration('redis', bosslet_config) vpc_id = config.find_vpc() internal_subnets, external_subnets = config.find_all_subnets() sgs = aws.sg_lookup_all(session, vpc_id) # Create the Cache and CacheState Redis Clusters REDIS_PARAMETERS = { "maxmemory-policy": "volatile-lru", "reserved-memory-percent": str(const.REDIS_RESERVED_MEMORY_PERCENT), "maxmemory-samples": "5", # ~ 5 - 10 } config.add_redis_replication("Cache", names.cache.redis, internal_subnets, [sgs[names.internal.sg]], type_=const.REDIS_CACHE_TYPE, version="3.2.4", clusters=const.REDIS_CLUSTER_SIZE, parameters=REDIS_PARAMETERS) config.add_redis_replication("CacheState", names.cache_state.redis, internal_subnets, [sgs[names.internal.sg]], type_=const.REDIS_TYPE, version="3.2.4", clusters=const.REDIS_CLUSTER_SIZE) # This one may not be created depending on the scenario type. if const.REDIS_SESSION_TYPE is not None: config.add_redis_replication("CacheSession", names.cache_session.redis, internal_subnets, [sgs[names.internal.sg]], type_=const.REDIS_SESSION_TYPE, version="3.2.4", clusters=1) if const.REDIS_THROTTLE_TYPE is not None: vpc_id = config.find_vpc() internal_sg = aws.sg_lookup(session, vpc_id, names.internal.sg) lambda_subnets, _ = config.find_all_subnets(compatibility='lambda') config.add_redis_replication("CacheThrottle", names.cache_throttle.redis, internal_subnets, [sgs[names.internal.sg]], type_=const.REDIS_THROTTLE_TYPE, version="3.2.4", clusters=1) config.add_lambda("CacheThrottleLambda", names.cache_throttle.lambda_, aws.role_arn_lookup(session, 'lambda_basic_execution'), description="Reset Boss throttling metrics", security_groups=[internal_sg], subnets=lambda_subnets, handler='index.handler', timeout=120, memory=1024) # Schedule the lambda to be executed at midnight for the timezone where the bosslet is located hour = TIMEZONE_OFFSET.get(bosslet_config.REGION, 0) schedule = 'cron(0 {} * * ? *)'.format(hour) config.add_cloudwatch_rule( 'CacheThrottleReset', name=names.cache_throttle.cw, description='Reset the current Boss throttling metrics', targets=[ { 'Arn': Arn('CacheThrottleLambda'), 'Id': names.cache_throttle.lambda_, 'Input': json.dumps({'host': names.cache_throttle.redis}), }, ], schedule=schedule, depends_on=['CacheThrottleLambda']) config.add_lambda_permission('CacheThrottlePerms', names.cache_throttle.lambda_, principal='events.amazonaws.com', source=Arn('CacheThrottleReset')) return config
def create_config(session, domain): """Create the CloudFormationConfiguration object. :arg session used to perform lookups :arg domain DNS name of vpc """ config = CloudFormationConfiguration('cloudwatch', domain) names = AWSNames(domain) vpc_id = config.find_vpc(session) lambda_subnets, _ = config.find_all_availability_zones( session, lambda_compatible_only=True) internal_sg = aws.sg_lookup(session, vpc_id, names.internal) loadbalancer_name = names.endpoint_elb if not aws.lb_lookup(session, loadbalancer_name): raise Exception("Invalid load balancer name: " + loadbalancer_name) # TODO Test that MailingListTopic is working. production_mailing_list = const.PRODUCTION_MAILING_LIST mailing_list_arn = aws.sns_topic_lookup(session, production_mailing_list) if mailing_list_arn is None: #config.add_sns_topic("topicList", production_mailing_list) msg = "MailingList {} needs to be created before running config" raise Exception(msg.format(const.PRODUCTION_MAILING_LIST)) config.add_cloudwatch(loadbalancer_name, [mailing_list_arn]) lambda_role = aws.role_arn_lookup(session, 'VaultConsulHealthChecker') config.add_arg( Arg.String('VaultConsulHealthChecker', lambda_role, 'IAM role for vault/consul health check.' + domain)) config.add_lambda('VaultLambda', names.vault_monitor, description='Check health of vault instances.', timeout=30, role=Ref('VaultConsulHealthChecker'), security_groups=[internal_sg], subnets=lambda_subnets, handler='index.lambda_handler', file=const.VAULT_LAMBDA) config.add_lambda('ConsulLambda', names.consul_monitor, description='Check health of vault instances.', timeout=30, role=Ref('VaultConsulHealthChecker'), security_groups=[internal_sg], subnets=lambda_subnets, handler='index.lambda_handler', file=const.CONSUL_LAMBDA) # Lambda input data json_str = json.dumps({ 'vpc_id': vpc_id, 'vpc_name': domain, 'topic_arn': mailing_list_arn, }) config.add_cloudwatch_rule( 'VaultConsulCheck', name=names.vault_consul_check, description='Check health of vault and consul instances.', targets=[ { 'Arn': Arn('VaultLambda'), 'Id': names.vault_monitor, 'Input': json_str }, { 'Arn': Arn('ConsulLambda'), 'Id': names.consul_monitor, 'Input': json_str }, ], schedule='rate(1 minute)', depends_on=['VaultLambda', 'ConsulLambda']) config.add_lambda_permission('VaultPerms', names.vault_monitor, principal='events.amazonaws.com', source=Arn('VaultConsulCheck')) config.add_lambda_permission('ConsulPerms', names.consul_monitor, principal='events.amazonaws.com', source=Arn('VaultConsulCheck')) return config