def create_config(session, domain, keypair=None): """ Create the CloudFormationConfiguration object. Args: session: amazon session object domain (string): domain of the stack being created keypair: keypair used to by instances being created Returns: the config for the Cloud Formation stack """ names = AWSNames(domain) config = CloudFormationConfiguration('redis', domain, const.REGION) vpc_id = config.find_vpc(session) az_subnets, external_subnets = config.find_all_availability_zones(session) sgs = aws.sg_lookup_all(session, vpc_id) # Create the Cache and CacheState Redis Clusters REDIS_PARAMETERS = { "maxmemory-policy": "volatile-lru", "reserved-memory": str(get_scenario(const.REDIS_RESERVED_MEMORY, 0) * 1000000), "maxmemory-samples": "5", # ~ 5 - 10 } config.add_redis_replication("Cache", names.cache, az_subnets, [sgs[names.internal]], type_=const.REDIS_CACHE_TYPE, version="3.2.4", clusters=const.REDIS_CLUSTER_SIZE, parameters=REDIS_PARAMETERS) config.add_redis_replication("CacheState", names.cache_state, az_subnets, [sgs[names.internal]], type_=const.REDIS_TYPE, version="3.2.4", clusters=const.REDIS_CLUSTER_SIZE) return config
def create_config(session, domain, keypair=None): """ Create the CloudFormationConfiguration object. Args: session: amazon session object domain (string): domain of the stack being created keypair: keypair used to by instances being created Returns: the config for the Cloud Formation stack """ names = AWSNames(domain) config = CloudFormationConfiguration('redis', domain, const.REGION) vpc_id = config.find_vpc(session) az_subnets, external_subnets = config.find_all_availability_zones(session) sgs = aws.sg_lookup_all(session, vpc_id) # Create the Cache and CacheState Redis Clusters REDIS_PARAMETERS = { "maxmemory-policy": "volatile-lru", "reserved-memory": str(get_scenario(const.REDIS_RESERVED_MEMORY, 0) * 1000000), "maxmemory-samples": "5", # ~ 5 - 10 } config.add_redis_replication("Cache", names.cache, az_subnets, [sgs[names.internal]], type_=const.REDIS_CACHE_TYPE, version="3.2.4", clusters=const.REDIS_CLUSTER_SIZE, parameters=REDIS_PARAMETERS) config.add_redis_replication("CacheState", names.cache_state, az_subnets, [sgs[names.internal]], type_=const.REDIS_TYPE, version="3.2.4", clusters=const.REDIS_CLUSTER_SIZE) # This one may not be created depending on the scenario type. if get_scenario(const.REDIS_SESSION_TYPE, None) is not None: config.add_redis_replication("CacheSession", names.cache_session, az_subnets, [sgs[names.internal]], type_=const.REDIS_SESSION_TYPE, version="3.2.4", clusters=1) return config
def create_config(bosslet_config): names = bosslet_config.names session = bosslet_config.session config = CloudFormationConfiguration('redis', bosslet_config) vpc_id = config.find_vpc() internal_subnets, external_subnets = config.find_all_subnets() sgs = aws.sg_lookup_all(session, vpc_id) # Create the Cache and CacheState Redis Clusters REDIS_PARAMETERS = { "maxmemory-policy": "volatile-lru", "reserved-memory-percent": str(const.REDIS_RESERVED_MEMORY_PERCENT), "maxmemory-samples": "5", # ~ 5 - 10 } config.add_redis_replication("Cache", names.cache.redis, internal_subnets, [sgs[names.internal.sg]], type_=const.REDIS_CACHE_TYPE, version="3.2.4", clusters=const.REDIS_CLUSTER_SIZE, parameters=REDIS_PARAMETERS) config.add_redis_replication("CacheState", names.cache_state.redis, internal_subnets, [sgs[names.internal.sg]], type_=const.REDIS_TYPE, version="3.2.4", clusters=const.REDIS_CLUSTER_SIZE) # This one may not be created depending on the scenario type. if const.REDIS_SESSION_TYPE is not None: config.add_redis_replication("CacheSession", names.cache_session.redis, internal_subnets, [sgs[names.internal.sg]], type_=const.REDIS_SESSION_TYPE, version="3.2.4", clusters=1) return config
def create_config(bosslet_config): names = bosslet_config.names session = bosslet_config.session config = CloudFormationConfiguration('redis', bosslet_config) vpc_id = config.find_vpc() internal_subnets, external_subnets = config.find_all_subnets() sgs = aws.sg_lookup_all(session, vpc_id) # Create the Cache and CacheState Redis Clusters REDIS_PARAMETERS = { "maxmemory-policy": "volatile-lru", "reserved-memory-percent": str(const.REDIS_RESERVED_MEMORY_PERCENT), "maxmemory-samples": "5", # ~ 5 - 10 } config.add_redis_replication("Cache", names.cache.redis, internal_subnets, [sgs[names.internal.sg]], type_=const.REDIS_CACHE_TYPE, version="3.2.4", clusters=const.REDIS_CLUSTER_SIZE, parameters=REDIS_PARAMETERS) config.add_redis_replication("CacheState", names.cache_state.redis, internal_subnets, [sgs[names.internal.sg]], type_=const.REDIS_TYPE, version="3.2.4", clusters=const.REDIS_CLUSTER_SIZE) # This one may not be created depending on the scenario type. if const.REDIS_SESSION_TYPE is not None: config.add_redis_replication("CacheSession", names.cache_session.redis, internal_subnets, [sgs[names.internal.sg]], type_=const.REDIS_SESSION_TYPE, version="3.2.4", clusters=1) if const.REDIS_THROTTLE_TYPE is not None: vpc_id = config.find_vpc() internal_sg = aws.sg_lookup(session, vpc_id, names.internal.sg) lambda_subnets, _ = config.find_all_subnets(compatibility='lambda') config.add_redis_replication("CacheThrottle", names.cache_throttle.redis, internal_subnets, [sgs[names.internal.sg]], type_=const.REDIS_THROTTLE_TYPE, version="3.2.4", clusters=1) config.add_lambda("CacheThrottleLambda", names.cache_throttle.lambda_, aws.role_arn_lookup(session, 'lambda_basic_execution'), description="Reset Boss throttling metrics", security_groups=[internal_sg], subnets=lambda_subnets, handler='index.handler', timeout=120, memory=1024) # Schedule the lambda to be executed at midnight for the timezone where the bosslet is located hour = TIMEZONE_OFFSET.get(bosslet_config.REGION, 0) schedule = 'cron(0 {} * * ? *)'.format(hour) config.add_cloudwatch_rule( 'CacheThrottleReset', name=names.cache_throttle.cw, description='Reset the current Boss throttling metrics', targets=[ { 'Arn': Arn('CacheThrottleLambda'), 'Id': names.cache_throttle.lambda_, 'Input': json.dumps({'host': names.cache_throttle.redis}), }, ], schedule=schedule, depends_on=['CacheThrottleLambda']) config.add_lambda_permission('CacheThrottlePerms', names.cache_throttle.lambda_, principal='events.amazonaws.com', source=Arn('CacheThrottleReset')) return config