Esempio n. 1
0
def modify_schema_owner_password(event, context):
    _validate()
    logger.info(event)
    """
    We don't know the password for 'capture_owner' on the production db,
    but we have already changed the postgres password in the modifyDbCluster step.
    So change the password for 'capture_owner' here.
    :param event:
    :param context:
    :return:
    """
    original = secrets_client.get_secret_value(SecretId=NWCAPTURE_REAL, )
    secret_string = json.loads(original['SecretString'])
    db_host = secret_string['DATABASE_ADDRESS']
    db_name = secret_string['DATABASE_NAME']
    postgres_password = secret_string['POSTGRES_PASSWORD']
    schema_owner_password = secret_string['SCHEMA_OWNER_PASSWORD']
    logger.info(
        f"db_host {db_host} db_name {db_name} postgres_password {postgres_password} sop {schema_owner_password}"
    )
    rds = RDS(db_host, 'postgres', db_name, postgres_password)
    logger.info("got rds ok")
    sql = "alter user capture_owner with password %s"
    rds.alter_permissions(sql, (schema_owner_password, ))

    queue_info = sqs_client.get_queue_url(QueueName=CAPTURE_TRIGGER_QUEUE)
    sqs_client.purge_queue(QueueUrl=queue_info['QueueUrl'])
    queue_info = sqs_client.get_queue_url(QueueName=ERROR_QUEUE)
    sqs_client.purge_queue(QueueUrl=queue_info['QueueUrl'])

    enable_lambda_trigger(TRIGGER[os.environ['STAGE']])
Esempio n. 2
0
def _start_db(db, triggers, queue_name):
    purge_queue(queue_name)
    cluster_identifiers = describe_db_clusters("start")
    started = False
    for cluster_identifier in cluster_identifiers:
        if cluster_identifier == db:
            start_db_cluster(db)
            started = True
            enable_lambda_trigger(triggers)
    return started
def _start_db(db, triggers, queue_name):
    """
    Purging the queue was originally done for expense control on the test and QA tiers in the early days, but now that
    development is further along, we'd like to see these tiers coping with a more production-like backlog.
    """
    # purge_queue(queue_name)
    cluster_identifiers = describe_db_clusters("start")
    started = False
    for cluster_identifier in cluster_identifiers:
        if cluster_identifier == db:
            start_db_cluster(db)
            started = True
            enable_lambda_trigger(triggers)
    return started
 def test_enable_lambda_trigger_already_enabled(self, mock_boto):
     client = mock.Mock()
     mock_boto.return_value = client
     client.list_event_source_mappings.return_value = self.mock_event_source_mapping
     client.get_event_source_mapping.return_value = {"State": "Enabled"}
     result = enable_lambda_trigger(["my_function_name"])
     assert result is False
     mock_boto.assert_called_with("lambda", "us-west-2")
     client.list_event_source_mappings.assert_called_with(
         FunctionName='my_function_name')
     client.update_event_source_mapping.assert_not_called()
def enable_trigger(event, context):
    if _is_cluster_available(DEFAULT_DB_CLUSTER_IDENTIFIER):
        enable_lambda_trigger(TRIGGER[STAGE])