def test_cache_incremented_by_key(self, cache):
        """Ensure that the cache object is incremented by the key passed in."""
        from furious.batcher import bump_batch

        cache.incr.return_value = 2

        val = bump_batch('group')

        self.assertEqual(val, 2)

        cache.incr.assert_called_once_with('agg-batch-group')
    def test_cache_incremented_by_key(self, cache):
        """Ensure that the cache object is incremented by the key passed in."""
        from furious.batcher import bump_batch

        cache.incr.return_value = 2

        val = bump_batch('group')

        self.assertEqual(val, 2)

        cache.incr.assert_called_once_with('agg-batch-group')
Exemple #3
0
def process_messages(tag, retries=0):
    """Processes the messages pulled fromm a queue based off the tag passed in.
    Will insert another processor if any work was processed or the retry count
    is under the max retry count. Will update a aggregated stats object with
    the data in the payload of the messages processed.

    :param tag: :class: `str` Tag to query the queue on
    :param retry: :class: `int` Number of retries the job has processed
    """
    from furious.batcher import bump_batch
    from furious.batcher import MESSAGE_DEFAULT_QUEUE
    from furious.batcher import MessageIterator
    from furious.batcher import MessageProcessor

    from google.appengine.api import memcache

    # since we don't have a flag for checking complete we'll re-insert a
    # processor task with a retry count to catch any work that may still be
    # filtering in. If we've hit our max retry count we just bail out and
    # consider the job complete.
    if retries > 5:
        logging.info("Process messages hit max retry and is exiting")
        return

    # create a message iteragor for the tag in batches of 500
    message_iterator = MessageIterator(tag, MESSAGE_DEFAULT_QUEUE, 500)

    client = memcache.Client()

    # get the stats object from cache
    stats = client.gets(tag)

    # json decode it if it exists otherwise get the default state.
    stats = json.loads(stats) if stats else get_default_stats()

    work_processed = False

    # loop through the messages pulled from the queue.
    for message in message_iterator:
        work_processed = True

        value = int(message.get("value", 0))
        color = message.get("color").lower()

        # update the total stats with the value pulled
        set_stats(stats["totals"], value)

        # update the specific color status via the value pulled
        set_stats(stats["colors"][color], value)

    # insert the stats back into cache
    json_stats = json.dumps(stats)

    # try and do an add first to see if it's new. We can't trush get due to
    # a race condition.
    if not client.add(tag, json_stats):
        # if we couldn't add than lets do a compare and set to safely
        # update the stats
        if not client.cas(tag, json_stats):
            raise Exception("Transaction Collision.")

    # bump the process batch id
    bump_batch(tag)

    if work_processed:
        # reset the retries as we've processed work
        retries = 0
    else:
        # no work was processed so increment the retries
        retries += 1

    # insert another processor
    processor = MessageProcessor(target=process_messages,
                                 args=("colors", ),
                                 kwargs={'retries': retries},
                                 tag="colors")

    processor.start()
Exemple #4
0
def process_messages(tag, retries=0):
    """Processes the messages pulled fromm a queue based off the tag passed in.
    Will insert another processor if any work was processed or the retry count
    is under the max retry count. Will update a aggregated stats object with
    the data in the payload of the messages processed.

    :param tag: :class: `str` Tag to query the queue on
    :param retry: :class: `int` Number of retries the job has processed
    """
    from furious.batcher import bump_batch
    from furious.batcher import MESSAGE_DEFAULT_QUEUE
    from furious.batcher import MessageIterator
    from furious.batcher import MessageProcessor

    from google.appengine.api import memcache

    # since we don't have a flag for checking complete we'll re-insert a
    # processor task with a retry count to catch any work that may still be
    # filtering in. If we've hit our max retry count we just bail out and
    # consider the job complete.
    if retries > 5:
        logging.info("Process messages hit max retry and is exiting")
        return

    # create a message iteragor for the tag in batches of 500
    message_iterator = MessageIterator(tag, MESSAGE_DEFAULT_QUEUE, 500)

    client = memcache.Client()

    # get the stats object from cache
    stats = client.gets(tag)

    # json decode it if it exists otherwise get the default state.
    stats = json.loads(stats) if stats else get_default_stats()

    work_processed = False

    # loop through the messages pulled from the queue.
    for message in message_iterator:
        work_processed = True

        value = int(message.get("value", 0))
        color = message.get("color").lower()

        # update the total stats with the value pulled
        set_stats(stats["totals"], value)

        # update the specific color status via the value pulled
        set_stats(stats["colors"][color], value)

    # insert the stats back into cache
    json_stats = json.dumps(stats)

    # try and do an add first to see if it's new. We can't trush get due to
    # a race condition.
    if not client.add(tag, json_stats):
        # if we couldn't add than lets do a compare and set to safely
        # update the stats
        if not client.cas(tag, json_stats):
            raise Exception("Transaction Collision.")

    # bump the process batch id
    bump_batch(tag)

    if work_processed:
        # reset the retries as we've processed work
        retries = 0
    else:
        # no work was processed so increment the retries
        retries += 1

    # insert another processor
    processor = MessageProcessor(
        target=process_messages, args=("colors",),
        kwargs={'retries': retries}, tag="colors")

    processor.start()