예제 #1
0
async def handle_event(event, lambda_context):
    logger.info("Event: {}".format(json.dumps(event, indent=2)))

    async with aioaws.resource("s3") as s3_resource:
        records = [json.loads(record["body"]) for record in event["Records"]]
        await asyncio.gather(
            *[__gather(record, s3_resource) for record in records])
예제 #2
0
 def __exit__(self, exc_type, exc_value, tb):
     self.end = time.time_ns()
     self.duration_milis = int((self.end - self.start) / 1000 / 1000)
     if exc_type is None:
         logger.info("SUCCESS of \"%s\". Duration %d millis.", str(self.message), self.duration_milis)
     else:
         logger.info("FAILURE of \"%s\". Duration %d millis.", str(self.message), self.duration_milis,
                     exc_info=True)
async def handle_event(event, lambda_context):
    logger.info("Event: {}".format(json.dumps(event, indent=2)))

    s3_objects = __get_s3_objects_from(event)
    batch_ids = set([__extract_batch_id(key[1]) for key in s3_objects])

    async with aioaws.client("sqs") as sqs_client, \
        aioaws.resource("s3") as s3_resource:
        await asyncio.gather(*[
            __check_if_complete(batch_id, s3_resource, sqs_client)
            for batch_id in batch_ids
        ])
예제 #4
0
async def handle_event(event, lambda_context):
    logger.info("Event: {}".format(json.dumps(event, indent=2)))

    s3_objects = __get_s3_objects_from(event)

    async with aioaws.resource("s3") as s3_resource, \
        aioaws.resource("dynamodb") as dynamodb_resource, \
        await items_table.new_batch_writer(dynamodb_resource) as batch_writer:
        await asyncio.gather(*[
            __process(s3_object, s3_resource, batch_writer)
            for s3_object in s3_objects
        ])
예제 #5
0
async def handle_event(event, lambda_context):
    logger.info("Event: {}".format(json.dumps(event, indent=2)))

    async with aioaws.client("sqs") as sqs_client, \
        aioaws.resource("s3") as s3_resource, \
        aioaws.resource("dynamodb") as dynamodb_resource, \
        await items_table.new_batch_writer(dynamodb_resource) as batch_writer:
        chunks = [json.loads(record["body"]) for record in event["Records"]]
        await asyncio.gather(*[__process(chunk, s3_resource, batch_writer) for chunk in chunks])

        batch_ids = {chunk["batchId"] for chunk in chunks}
        await asyncio.gather(*[__check_if_complete(batch_id, s3_resource, sqs_client) for batch_id in batch_ids])
예제 #6
0
def handle_event(event, lambda_context):
    logger.info("Event: {}".format(json.dumps(event, indent=2)))
    s3_object = __get_s3_object_from(event)
    if s3_object is None:
        return
    batch_id = __extract_batch_id(s3_object[1])
    record_batch_started(batch_id)
    with trace("Scattering {}", batch_id):
        batch_doc = input_bucket.read_batch_input(s3_object[0], s3_object[1])
        validate_input(batch_doc)
        records = batch_doc.get("records", [])
        work_bucket.write_batch_status(batch_id, len(records))
        __write_tasks_and_send_messages(batch_id, records)

    input_bucket.delete_batch_input(s3_object[0], s3_object[1])
    record_scatter_finished(batch_id, len(records))
예제 #7
0
async def handle_event(event, lambda_context):
    logger.info("Event: {}".format(json.dumps(event, indent=2)))

    s3_object = __get_s3_object_from(event)
    if s3_object is None:
        logger.info("Is s3 test event. Skipping.")
        return

    batch_id = __extract_batch_id(s3_object[1])
    async with trace("Scattering {}", batch_id):
        async with aioaws.resource("s3") as s3_resource, aioaws.client("sqs") as sqs_client:
            batch_doc = await input_bucket.read_batch_input(s3_object[0], s3_object[1], s3_resource)
            validate_input(batch_doc)
            records = batch_doc.get("records", [])
            record_batch_started(batch_id)
            await work_bucket.write_batch_status(batch_id, len(records), CHUNK_SIZE, s3_resource)
            await __write_chunks(batch_id, records, s3_resource, sqs_client)
            await input_bucket.delete_batch_input(s3_object[0], s3_object[1], s3_resource)
    record_scatter_finished(batch_id, len(records))
    async def _flush(self):
        if self._queue_url is None:
            response = await self._sqs_client.get_queue_url(QueueName=self._queue_name)
            self._queue_url = response["QueueUrl"]

        logger.info("Flushing buffer of %d message(s)...", len(self._messages_buffer))
        messages_to_send = self._messages_buffer[:self._flush_amount]
        self._messages_buffer = self._messages_buffer[self._flush_amount:]
        response = await self._sqs_client.send_message_batch(QueueUrl=self._queue_url, Entries=messages_to_send)
        retryable_ids = [entry["Id"] for entry in response.get("Failed", []) if entry["SenderFault"] is False]
        if len(retryable_ids) > 0:
            logger.info("%d message(s) retryable... putting back.", len(retryable_ids))
            retryable_messages = [m for m in messages_to_send if m["Id"] in retryable_ids]
            self._messages_buffer.extend(retryable_messages)

        sender_faults = [entry for entry in response.get("Failed", []) if entry["SenderFault"] is True]
        if len(sender_faults) > 0:
            sender_fault = sender_faults[0]
            raise ValueError(
                "Sending message {} failed: code={}, message={}".format(sender_fault["Id"], sender_fault["Code"],
                                                                            sender_fault["Message"]))
예제 #9
0
def handle_event(event, lambda_context):
    logger.info("Event: {}".format(json.dumps(event, indent=2)))
    records = event["Records"]
    with items_table.new_batch_writer() as batch_writer:
        for record in records:
            record = json.loads(record["body"])
            with trace("Processing {}", json.dumps(record)):
                index = record["index"]
                batch_id = record["batchId"]
                request = record["request"]
                item_no = request["itemNo"]
                items_table.put_item(
                    {
                        "itemNo": str(item_no),
                        "updateTimestamp": now_epoch_millis()
                    }, batch_writer)
                work_bucket.write_task_result(batch_id, index, request, {
                    "success": True,
                    "message": "Ok"
                })
                work_bucket.delete_pending_task(batch_id, index)
                if not work_bucket.exists_pending_task(batch_id):
                    gather_queue.send_batch_complete_message(batch_id)
예제 #10
0
async def handle_event(event, lambda_context):
    logger.info("Event: {}".format(json.dumps(event, indent=2)))

    s3_object = __get_s3_object_from(event)
    if s3_object is None:
        return
    batch_id = __extract_batch_id(s3_object[1])
    async with trace("Scattering {}", batch_id):
        async with aioaws.resource("s3") as s3_resource, \
            aioaws.client("sqs") as sqs_client, \
            aioaws.resource("dynamodb") as dynamodb_resource:
            batch_doc = await input_bucket.read_batch_input(
                s3_object[0], s3_object[1], s3_resource)
            validate_input(batch_doc)
            records = batch_doc.get("records", [])
            record_batch_started(batch_id)
            await batch_status_table.put_batch_status(batch_id, len(records),
                                                      dynamodb_resource)
            await __write_chunks_and_send_messages(batch_id, records,
                                                   dynamodb_resource,
                                                   sqs_client)
            await input_bucket.delete_batch_input(s3_object[0], s3_object[1],
                                                  s3_resource)
    record_scatter_finished(batch_id, len(records))
예제 #11
0
 def __enter__(self):
     self.start = time.time_ns()
     logger.info("START \"%s\"...", str(self.message))
     return self