コード例 #1
0
ファイル: system.py プロジェクト: ra2003/mercury
def system_queue():
    user = auth.is_logged_in(request)
    permission = auth.is_sys_admin(user)
    queue = Queue.select().order_by(Queue.site.asc(), Queue.blog.asc(),
                                    Queue.job_type.asc(),
                                    Queue.date_touched.desc())

    return listing(request, None, queue, 'queue', 'system_queue', user=user)
コード例 #2
0
ファイル: system.py プロジェクト: syegulalp/mercury
def system_queue():
    user = auth.is_logged_in(request)
    permission = auth.is_sys_admin(user)
    queue = Queue.select().order_by(Queue.site.asc(), Queue.blog.asc(), Queue.job_type.asc(),
        Queue.date_touched.desc())

    return listing(request, None, queue,
               'queue', 'system_queue',
               user=user)
コード例 #3
0
def process_queue(blog):
    '''
    Processes the jobs currently in the queue for the selected blog.
    '''
    with db.atomic():

        queue_control = publishing_lock(blog, True)

        if queue_control is None:
            return 0

        queue_control.data_string = 'Running'
        queue_control.save()

        queue = Queue.select().order_by(
            Queue.priority.desc(), Queue.date_touched.desc()).where(
                Queue.blog == blog,
                Queue.is_control == False).limit(MAX_BATCH_OPS)

        queue_length = queue.count()

        if queue_length > 0:
            logger.info(
                "Queue job #{} @ {} (blog #{}, {} items) started.".format(
                    queue_control.id, date_format(queue_control.date_touched),
                    queue_control.blog.id, queue_length))

        for q in queue:

            try:
                job_type.action[q.job_type](q)
            except BaseException:
                raise
            else:
                remove_from_queue(q.id)

        queue_control = Queue.get(Queue.blog == blog, Queue.is_control == True)

        queue_control.data_integer -= queue_length

        if queue_control.data_integer <= 0:
            queue_control.delete_instance()
            logger.info("Queue job #{} @ {} (blog #{}) finished.".format(
                queue_control.id, date_format(queue_control.date_touched),
                queue_control.blog.id))
        else:
            queue_control.data_string = 'Paused'
            queue_control.save()
            logger.info(
                "Queue job #{} @ {} (blog #{}) continuing with {} items left.".
                format(queue_control.id,
                       date_format(queue_control.date_touched),
                       queue_control.blog.id, queue_length))

    return queue_control.data_integer
コード例 #4
0
def process_queue_publish(queue_control, blog):
    '''
    Processes the publishing queue for a given blog.
    Takes in a queue_control entry, and returns an integer of the number of
    jobs remaining in the queue for that blog.
    Typically invoked by the process_queue function.

    :param queue_control:
        The queue_control entry, from the queue, to use for this publishing queue run.
    :param blog:
        The blog object that is in context for this job.
    '''
    # from . import invalidate_cache
    # invalidate_cache()

    queue_control.lock()

    queue_original = Queue.select().order_by(Queue.priority.desc(),
                                             Queue.date_touched.desc()).where(
                                                 Queue.blog == blog,
                                                 Queue.is_control == False)

    queue = queue_original.limit(MAX_BATCH_OPS).naive()

    queue_original_length = queue_original.count()
    queue_length = queue.count()

    start_queue = time.clock()

    if queue_length > 0:
        logger.info("Queue job #{} @ {} (blog #{}, {} items) started.".format(
            queue_control.id, date_format(queue_control.date_touched),
            queue_control.blog.id, queue_original_length))

    removed_jobs = []

    start = time.clock()

    for q in queue:
        job_type.action[q.job_type](q)
        removed_jobs.append(q.id)

        if (time.clock() - start) > LOOP_TIMEOUT:
            break

    Queue.remove(removed_jobs)

    # we don't need to have an entirely new job!
    # we should recycle the existing one, yes?

    new_queue_control = Queue.control_job(blog)

    # new_queue_control = Queue.get(Queue.blog == blog,
    # Queue.is_control == True)

    queue_original_length -= len(removed_jobs)
    new_queue_control.data_integer = queue_original_length

    end_queue = time.clock()

    total_time = end_queue - start_queue

    if new_queue_control.data_integer <= 0:
        new_queue_control.delete_instance()
        logger.info(
            "Queue job #{} @ {} (blog #{}) finished ({:.4f} secs).".format(
                new_queue_control.id,
                date_format(new_queue_control.date_touched),
                new_queue_control.blog.id, total_time))

    else:
        # new_queue_control.is_running = False
        # new_queue_control.save()
        new_queue_control.unlock()
        logger.info(
            "Queue job #{} @ {} (blog #{}) processed {} items ({:.4f} secs, {} remaining)."
            .format(
                new_queue_control.id,
                date_format(new_queue_control.date_touched),
                new_queue_control.blog.id,
                len(removed_jobs),
                total_time,
                queue_original_length,
            ))

    return new_queue_control.data_integer
コード例 #5
0
ファイル: tasks.py プロジェクト: syegulalp/mercury
    scheduled_pages = Page.select().where(
        Page.status == page_status.scheduled,
        Page.publication_date <= datetime.datetime.utcnow()).order_by(
            Page.publication_date.desc())

    total_pages = scheduled_pages.count()

    print ('{} pages scheduled'.format(total_pages))

    if total_pages > 0:
        for p in scheduled_pages.select(Page.blog).distinct():
            b = p.blog
            blogs_to_check[b.id] = b

    queue_count = Queue.select(Queue.blog).distinct()

    if queue_count.count() > 0:
        for n in queue_count:
            b = n.blog
            print ('Blog {} has existing queue items'.format(b.id))
            blogs_to_check[b.id] = b

    if blogs_to_check:
        print ("Starting run.")
        from core.cms.queue import (queue_page_actions, queue_index_actions,
            queue_ssi_actions)
        from core.models import db
        from core.log import logger
        from time import sleep
コード例 #6
0
    scheduled_pages = Page.select().where(
        Page.status == page_status.scheduled,
        Page.publication_date <= datetime.datetime.utcnow()).order_by(
            Page.publication_date.desc())

    total_pages = scheduled_pages.count()

    print('{} pages scheduled'.format(total_pages))

    if total_pages > 0:
        for p in scheduled_pages.select(Page.blog).distinct():
            b = p.blog
            blogs_to_check[b.id] = b

    queue_count = Queue.select(Queue.blog).distinct()

    if queue_count.count() > 0:
        for n in queue_count:
            b = n.blog
            print('Blog {} has existing queue items'.format(b.id))
            blogs_to_check[b.id] = b

    if blogs_to_check:
        print("Starting run.")
        from core.cms.queue import (queue_page_actions, queue_index_actions,
                                    queue_ssi_actions)
        from core.models import db
        from core.log import logger
        from time import sleep
コード例 #7
0
ファイル: queue.py プロジェクト: syegulalp/mercury
def process_queue_publish(queue_control, blog):
    '''
    Processes the publishing queue for a given blog.
    Takes in a queue_control entry, and returns an integer of the number of
    jobs remaining in the queue for that blog.
    Typically invoked by the process_queue function.

    :param queue_control:
        The queue_control entry, from the queue, to use for this publishing queue run.
    :param blog:
        The blog object that is in context for this job.
    '''
    # from . import invalidate_cache
    # invalidate_cache()

    queue_control.lock()

    queue_original = Queue.select().order_by(Queue.priority.desc(),
        Queue.date_touched.desc()).where(Queue.blog == blog,
        Queue.is_control == False)

    queue = queue_original.limit(MAX_BATCH_OPS).naive()

    queue_original_length = queue_original.count()
    queue_length = queue.count()

    start_queue = time.clock()

    if queue_length > 0:
        logger.info("Queue job #{} @ {} (blog #{}, {} items) started.".format(
            queue_control.id,
            date_format(queue_control.date_touched),
            queue_control.blog.id,
            queue_original_length))

    removed_jobs = []

    start = time.clock()

    for q in queue:
        job_type.action[q.job_type](q)
        removed_jobs.append(q.id)

        if (time.clock() - start) > LOOP_TIMEOUT:
            break

    Queue.remove(removed_jobs)

    # we don't need to have an entirely new job!
    # we should recycle the existing one, yes?

    new_queue_control = Queue.control_job(blog)

    # new_queue_control = Queue.get(Queue.blog == blog,
        # Queue.is_control == True)

    queue_original_length -= len(removed_jobs)
    new_queue_control.data_integer = queue_original_length

    end_queue = time.clock()

    total_time = end_queue - start_queue

    if new_queue_control.data_integer <= 0:
        new_queue_control.delete_instance()
        logger.info("Queue job #{} @ {} (blog #{}) finished ({:.4f} secs).".format(
            new_queue_control.id,
            date_format(new_queue_control.date_touched),
            new_queue_control.blog.id,
            total_time))

    else:
        # new_queue_control.is_running = False
        # new_queue_control.save()
        new_queue_control.unlock()
        logger.info("Queue job #{} @ {} (blog #{}) processed {} items ({:.4f} secs, {} remaining).".format(
            new_queue_control.id,
            date_format(new_queue_control.date_touched),
            new_queue_control.blog.id,
            len(removed_jobs),
            total_time,
            queue_original_length,
            ))

    return new_queue_control.data_integer