Example #1
0
def task_process_document_queues():
    logger.debug("executed")
    # reset_orphans()
    # Causes problems with big clusters increased latency
    # Disabled until better solution
    q_pending = Q(state=QUEUEDOCUMENT_STATE_PENDING)
    q_delayed = Q(delay=True)
    q_delay_interval = Q(datetime_submitted__lt=datetime.now() - timedelta(seconds=REPLICATION_DELAY))
    for document_queue in DocumentQueue.objects.filter(state=DOCUMENTQUEUE_STATE_ACTIVE):
        current_local_processing_count = (
            QueueDocument.objects.filter(state=QUEUEDOCUMENT_STATE_PROCESSING).filter(node_name=platform.node()).count()
        )
        if current_local_processing_count < NODE_CONCURRENT_EXECUTION:
            try:
                oldest_queued_document_qs = document_queue.queuedocument_set.filter(
                    (q_pending & ~q_delayed) | (q_pending & q_delayed & q_delay_interval)
                )

                if oldest_queued_document_qs:
                    oldest_queued_document = oldest_queued_document_qs.order_by("datetime_submitted")[0]
                    process_job(task_process_queue_document, oldest_queued_document.pk)
            except Exception, e:
                pass
                # print 'DocumentQueueWatcher exception: %s' % e
            finally:
Example #2
0
def task_process_document_queues():
    logger.debug('executed')
    # TODO: reset_orphans()
    q_pending = Q(state=QUEUEDOCUMENT_STATE_PENDING)
    q_delayed = Q(delay=True)
    q_delay_interval = Q(datetime_submitted__lt=now() -
                         timedelta(seconds=REPLICATION_DELAY))
    for document_queue in DocumentQueue.objects.filter(
            state=DOCUMENTQUEUE_STATE_ACTIVE):
        current_local_processing_count = QueueDocument.objects.filter(
            state=QUEUEDOCUMENT_STATE_PROCESSING).filter(
                node_name=platform.node()).count()
        if current_local_processing_count < NODE_CONCURRENT_EXECUTION:
            try:
                oldest_queued_document_qs = document_queue.queuedocument_set.filter(
                    (q_pending & ~q_delayed)
                    | (q_pending & q_delayed & q_delay_interval))

                if oldest_queued_document_qs:
                    oldest_queued_document = oldest_queued_document_qs.order_by(
                        'datetime_submitted')[0]
                    process_job(task_process_queue_document,
                                oldest_queued_document.pk)
            except Exception as exception:
                logger.error('unhandled exception: %s' % exception)
            finally:
                # Don't process anymore from this queryset, might be stale
                break
        else:
            logger.debug('already processing maximum')
    else:
        logger.debug('nothing to process')
Example #3
0
def task_process_document_queues():
    logger.debug('executed')
    # TODO: reset_orphans()
    q_pending = Q(state=QUEUEDOCUMENT_STATE_PENDING)
    q_delayed = Q(delay=True)
    q_delay_interval = Q(datetime_submitted__lt=now() - timedelta(seconds=REPLICATION_DELAY))
    for document_queue in DocumentQueue.objects.filter(state=DOCUMENTQUEUE_STATE_ACTIVE):
        current_local_processing_count = QueueDocument.objects.filter(
            state=QUEUEDOCUMENT_STATE_PROCESSING).filter(
            node_name=platform.node()).count()
        if current_local_processing_count < NODE_CONCURRENT_EXECUTION:
            try:
                oldest_queued_document_qs = document_queue.queuedocument_set.filter(
                    (q_pending & ~q_delayed) | (q_pending & q_delayed & q_delay_interval))

                if oldest_queued_document_qs:
                    oldest_queued_document = oldest_queued_document_qs.order_by('datetime_submitted')[0]
                    process_job(task_process_queue_document, oldest_queued_document.pk)
            except Exception as exception:
                logger.error('unhandled exception: %s' % exception)
            finally:
                # Don't process anymore from this queryset, might be stale
                break
        else:
            logger.debug('already processing maximum')
    else:
        logger.debug('nothing to process')
Example #4
0
def task_process_document_queues():
    logger.debug("executed")
    # TODO: reset_orphans()
    q_pending = Q(state=QUEUEDOCUMENT_STATE_PENDING)
    q_delayed = Q(delay=True)
    q_delay_interval = Q(datetime_submitted__lt=datetime.now() - timedelta(seconds=REPLICATION_DELAY))
    for document_queue in DocumentQueue.objects.filter(state=DOCUMENTQUEUE_STATE_ACTIVE):
        current_local_processing_count = (
            QueueDocument.objects.filter(state=QUEUEDOCUMENT_STATE_PROCESSING).filter(node_name=platform.node()).count()
        )
        if current_local_processing_count < NODE_CONCURRENT_EXECUTION:
            try:
                oldest_queued_document_qs = document_queue.queuedocument_set.filter(
                    (q_pending & ~q_delayed) | (q_pending & q_delayed & q_delay_interval)
                )

                if oldest_queued_document_qs:
                    oldest_queued_document = oldest_queued_document_qs.order_by("datetime_submitted")[0]
                    process_job(task_process_queue_document, oldest_queued_document.pk)
            except Exception, e:
                logger.error("unhandled exception: %s" % e)
            finally:
Example #5
0
def task_process_document_queues():
    logger.debug('executed')
    # reset_orphans()
    # Causes problems with big clusters increased latency
    # Disabled until better solution
    q_pending = Q(state=QUEUEDOCUMENT_STATE_PENDING)
    q_delayed = Q(delay=True)
    q_delay_interval = Q(datetime_submitted__lt=datetime.now() - timedelta(seconds=REPLICATION_DELAY))
    for document_queue in DocumentQueue.objects.filter(state=DOCUMENTQUEUE_STATE_ACTIVE):
        current_local_processing_count = QueueDocument.objects.filter(
            state=QUEUEDOCUMENT_STATE_PROCESSING).filter(
            node_name=platform.node()).count()
        if current_local_processing_count < NODE_CONCURRENT_EXECUTION:
            try:
                oldest_queued_document_qs = document_queue.queuedocument_set.filter(
                    (q_pending & ~q_delayed) | (q_pending & q_delayed & q_delay_interval))

                if oldest_queued_document_qs:
                    oldest_queued_document = oldest_queued_document_qs.order_by('datetime_submitted')[0]
                    process_job(task_process_queue_document, oldest_queued_document.pk)
            except Exception, e:
                pass
                #print 'DocumentQueueWatcher exception: %s' % e
            finally: