def delete_data_mapper_handler(event, context):
    if running_job_exists():
        raise ValueError("Cannot delete Data Mappers whilst there is a job in progress")
    data_mapper_id = event["pathParameters"]["data_mapper_id"]
    table.delete_item(Key={"DataMapperId": data_mapper_id})

    return {"statusCode": 204}
示例#2
0
def cancel_handler(event, context):
    if running_job_exists():
        raise ValueError(
            "Cannot delete matches whilst there is a job in progress")
    body = event["body"]
    matches = body["Matches"]
    with deletion_queue_table.batch_writer() as batch:
        for match in matches:
            batch.delete_item(
                Key={"DeletionQueueItemId": match["DeletionQueueItemId"]})

    return {"statusCode": 204}
def process_handler(event, context):
    if running_job_exists():
        raise ValueError("There is already a job in progress")

    job_id = str(uuid.uuid4())
    config = get_config()
    deletion_queue_key = 'jobs/{}/deletion_queue/data.json'.format(job_id)
    item = {
        "Id": job_id,
        "Sk": job_id,
        "Type": "Job",
        "JobStatus": "QUEUED",
        "GSIBucket": str(random.randint(0, bucket_count - 1)),
        "CreatedAt": utc_timestamp(),
        "DeletionQueueBucket": deletion_queue_bucket,
        "DeletionQueueKey": deletion_queue_key,
        "DeletionQueueItemsSkipped": False,
        "CreatedBy": get_user_info(event),
        **{
            k: v
            for k, v in config.items() if k not in ["JobDetailsRetentionDays"]
        }
    }

    if int(config.get("JobDetailsRetentionDays", 0)) > 0:
        item["Expires"] = utc_timestamp(days=config["JobDetailsRetentionDays"])

    deletion_queue_items = {"DeletionQueueItems": []}
    for extended_deletion_queue_item in get_deletion_queue():
        deletion_item = {
            "DeletionQueueItemId":
            extended_deletion_queue_item["DeletionQueueItemId"],
            "MatchId":
            extended_deletion_queue_item["MatchId"],
            "DataMappers":
            extended_deletion_queue_item["DataMappers"]
        }
        deletion_queue_items["DeletionQueueItems"].append(deletion_item)

    obj = s3.Object(deletion_queue_bucket, deletion_queue_key)
    obj.put(Body=json.dumps(deletion_queue_items))
    jobs_table.put_item(Item=item)

    # after sending the data to dynamo add the deletion_queue to the response
    item["DeletionQueueItems"] = list(
        map(lambda x: x["MatchId"],
            deletion_queue_items["DeletionQueueItems"]))

    return {"statusCode": 202, "body": json.dumps(item, cls=DecimalEncoder)}
示例#4
0
def test_it_returns_true_where_jobs_not_running(mock_table):
    mock_table.query.return_value = {"Items": []}
    assert not running_job_exists()
    mock_table.query.assert_called_with(
        IndexName=ANY,
        KeyConditionExpression=ANY,
        ScanIndexForward=False,
        FilterExpression="(#s = :r) or (#s = :q) or (#s = :c)",
        ExpressionAttributeNames={"#s": "JobStatus"},
        ExpressionAttributeValues={
            ":r": "RUNNING",
            ":q": "QUEUED",
            ":c": "FORGET_COMPLETED_CLEANUP_IN_PROGRESS",
        },
        Limit=1,
    )
def process_handler(event, context):
    if running_job_exists():
        raise ValueError("There is already a job in progress")

    job_id = str(uuid.uuid4())
    config = get_config()
    item = {
        "Id": job_id,
        "Sk": job_id,
        "Type": "Job",
        "JobStatus": "QUEUED",
        "GSIBucket": str(random.randint(0, bucket_count - 1)),
        "CreatedAt": utc_timestamp(),
        "DeletionQueueItems": [],
        "DeletionQueueItemsSkipped": False,
        "CreatedBy": get_user_info(event),
        **{
            k: v
            for k, v in config.items() if k not in ["JobDetailsRetentionDays"]
        }
    }

    if int(config.get("JobDetailsRetentionDays", 0)) > 0:
        item["Expires"] = utc_timestamp(days=config["JobDetailsRetentionDays"])

    item_size_bytes = calculate_ddb_item_bytes(item)

    for deletion_queue_item in get_deletion_queue():
        current_size_bytes = calculate_ddb_item_bytes(deletion_queue_item)
        if item_size_bytes + current_size_bytes < max_size_bytes:
            item['DeletionQueueItems'].append(deletion_queue_item)
            item_size_bytes += current_size_bytes
        else:
            item['DeletionQueueItemsSkipped'] = True
            break

    jobs_table.put_item(Item=item)

    return {"statusCode": 202, "body": json.dumps(item, cls=DecimalEncoder)}
示例#6
0
def process_handler(event, context):
    if running_job_exists():
        raise ValueError("There is already a job in progress")

    job_id = str(uuid.uuid4())
    config = get_config()
    item = {
        "Id": job_id,
        "Sk": job_id,
        "Type": "Job",
        "JobStatus": "QUEUED",
        "GSIBucket": str(random.randint(0, bucket_count - 1)),
        "CreatedAt": utc_timestamp(),
        "CreatedBy": get_user_info(event),
        **{
            k: v
            for k, v in config.items() if k not in ["JobDetailsRetentionDays"]
        },
    }
    if int(config.get("JobDetailsRetentionDays", 0)) > 0:
        item["Expires"] = utc_timestamp(days=config["JobDetailsRetentionDays"])
    jobs_table.put_item(Item=item)
    return {"statusCode": 202, "body": json.dumps(item, cls=DecimalEncoder)}
def delete_data_mapper_handler(event, context):
    if running_job_exists():
        raise ValueError(
            "Cannot delete Data Mappers whilst there is a job in progress")
    data_mapper_id = event["pathParameters"]["data_mapper_id"]
    data_mapper = table.get_item(Key={"DataMapperId": data_mapper_id})["Item"]
    query = "DROP TABLE {}.{}".format(data_mapper["DeletionQueueDb"],
                                      data_mapper["DeletionQueueTableName"])
    response = athena_client.start_query_execution(
        QueryString=query,
        ResultConfiguration={
            "OutputLocation":
            "s3://{bucket}/{prefix}/".format(
                bucket=s3f2_flow_bucket, prefix="data_mappers/queries/drop/")
        },
        WorkGroup=os.getenv("WorkGroup", "primary"),
    )
    if is_athena_query_successful(response):
        table.delete_item(Key={"DataMapperId": data_mapper_id})
    else:
        raise ValueError(
            "Failed to delete Deletion Queue Athena Table for Data Mapper")
    return {"statusCode": 204}