def test_describe_db_clusters(self, mock_boto): client = mock.Mock() mock_boto.return_value = client client.describe_db_clusters.return_value = { 'DBClusters': [{ 'DBClusterIdentifier': 'foo', 'Status': 'available' }] } describe_db_clusters("stop") client.describe_db_clusters.assert_called_once_with()
def control_db_utilization(event, context): """ Right now we are only listening for the error handler alarm, because it is the last alarm to get triggered when we are going into a death spiral. :param event: :param context: :return: """ logger.info(event) alarm_state = event["detail"]["state"]["value"] stage = os.getenv('STAGE') if stage not in STAGES: raise Exception(f"stage not recognized {os.getenv('STAGE')}") if alarm_state == "ALARM": logger.info( f"Disabling trigger {TRIGGER[stage]} because error handler is in alarm" ) disable_triggers(TRIGGER[stage]) elif alarm_state == "OK": """ We do NOT want to enable the trigger if the db is not up and running. """ active_dbs = describe_db_clusters('stop') if DB[stage] in active_dbs: logger.info( f"Enabling trigger {TRIGGER[stage]} for {DB[stage]} because error handler is okay" ) enable_triggers(TRIGGER[stage])
def _stop_db(db, triggers): cluster_identifiers = describe_db_clusters("stop") stopped = False disable_lambda_trigger(triggers) for cluster_identifier in cluster_identifiers: if cluster_identifier == db: stop_db_cluster(db) stopped = True return stopped
def _start_db(db, triggers, queue_name): purge_queue(queue_name) cluster_identifiers = describe_db_clusters("start") started = False for cluster_identifier in cluster_identifiers: if cluster_identifier == db: start_db_cluster(db) started = True enable_lambda_trigger(triggers) return started
def troubleshoot(event, context): if event['action'].lower() == 'start_capture_db': cluster_identifiers = describe_db_clusters("start") for cluster_identifier in cluster_identifiers: if cluster_identifier == DB[STAGE]: start_db_cluster(DB[STAGE]) elif event['action'].lower() == 'stop_capture_db': cluster_identifiers = describe_db_clusters("stop") for cluster_identifier in cluster_identifiers: if cluster_identifier == DB[STAGE]: stop_db_cluster(DB[STAGE]) elif event['action'].lower() == 'make_kms_key': _make_kms_key(event) elif event['action'].lower() == 'change_secret_kms_key': _change_secret_kms_key(event) elif event['action'].lower() == 'change_kms_key_policy': _change_kms_key_policy(event) elif event['action'].lower() == 'change_flow_rate': adjust_flow_rate(event['flow_rate']) # TODO remove elif event['action'].lower() == 'delete_stack': stack = event['stack'] client = boto3.client('cloudformation', "us-west-2") response = client.delete_stack(StackName=stack, ) elif event['action'].lower() == 'purge_queues': purge_queue([CAPTURE_TRIGGER_QUEUE, ERROR_QUEUE]) elif event['action'].lower() == 'create_access_point': _make_efs_access_point(event) elif event['action'].lower() == 'create_fargate_security_group': _make_fargate_security_group(event) elif event['action'].lower() == 'delete_fargate_security_group': # When you need to delete a security group, modify the code # here and specify the group id. Don't check into master client = boto3.client('ec2', os.getenv('AWS_DEPLOYMENT_REGION')) client.delete_security_group(GroupId='sg-xxxxxxxxxxxxxxxxx') elif event['action'].lower() == 'delete_access_point': # When you need to delete an efs access point, modify the code # here and specify the access point id. Don't check into master client = boto3.client('efs', os.getenv('AWS_DEPLOYMENT_REGION')) client.delete_access_point(AccessPointId='fsap-xxxxxxxxxxxxxxxxx') else: raise Exception(f"invalid action")
def _start_db(db, triggers, queue_name): """ Purging the queue was originally done for expense control on the test and QA tiers in the early days, but now that development is further along, we'd like to see these tiers coping with a more production-like backlog. """ # purge_queue(queue_name) cluster_identifiers = describe_db_clusters("start") started = False for cluster_identifier in cluster_identifiers: if cluster_identifier == db: start_db_cluster(db) started = True enable_lambda_trigger(triggers) return started