Beispiel #1
0
def process_queue(blog):
    '''
    Processes the jobs currently in the queue for the selected blog.
    '''
    with db.atomic():

        queue_control = publishing_lock(blog, True)

        if queue_control is None:
            return 0

        queue_control.data_string = 'Running'
        queue_control.save()

        queue = Queue.select().order_by(
            Queue.priority.desc(), Queue.date_touched.desc()).where(
                Queue.blog == blog,
                Queue.is_control == False).limit(MAX_BATCH_OPS)

        queue_length = queue.count()

        if queue_length > 0:
            logger.info(
                "Queue job #{} @ {} (blog #{}, {} items) started.".format(
                    queue_control.id, date_format(queue_control.date_touched),
                    queue_control.blog.id, queue_length))

        for q in queue:

            try:
                job_type.action[q.job_type](q)
            except BaseException:
                raise
            else:
                remove_from_queue(q.id)

        queue_control = Queue.get(Queue.blog == blog, Queue.is_control == True)

        queue_control.data_integer -= queue_length

        if queue_control.data_integer <= 0:
            queue_control.delete_instance()
            logger.info("Queue job #{} @ {} (blog #{}) finished.".format(
                queue_control.id, date_format(queue_control.date_touched),
                queue_control.blog.id))
        else:
            queue_control.data_string = 'Paused'
            queue_control.save()
            logger.info(
                "Queue job #{} @ {} (blog #{}) continuing with {} items left.".
                format(queue_control.id,
                       date_format(queue_control.date_touched),
                       queue_control.blog.id, queue_length))

    return queue_control.data_integer
Beispiel #2
0
    def save(self, user, current_revision, is_backup=False, change_note=None):

        from core.log import logger
        from core.error import PageNotChanged

        max_revisions = self.blog.max_revisions

        previous_revisions = (self.select().where(
            PageRevision.page_id == self.page_id).order_by(
                PageRevision.modified_date.desc()).limit(max_revisions))

        if previous_revisions.count() > 0:

            last_revision = previous_revisions[0]

            page_changed = False

            for name in last_revision._meta.fields:
                if name not in ("modified_date", "id", "page_id", "is_backup",
                                "change_note", "saved_by"):
                    value = getattr(current_revision, name)
                    new_value = getattr(last_revision, name)

                    if value != new_value:
                        page_changed = True
                        break

            if page_changed is False:
                raise PageNotChanged(
                    'Page {} was saved but without changes.'.format(
                        current_revision.for_log))

        if previous_revisions.count() >= max_revisions:

            older_revisions = DeleteQuery(PageRevision).where(
                PageRevision.page_id == self.page_id,
                PageRevision.modified_date <
                previous_revisions[max_revisions - 1].modified_date)

            older_revisions.execute()

        self.is_backup = is_backup
        self.change_note = change_note
        self.saved_by = user.id

        results = Model.save(self)

        logger.info("Revision {} for page {} created.".format(
            date_format(self.modified_date), self.for_log))

        return results
Beispiel #3
0
          'format':lambda x:x.priority
          },
         {'field':'job_type',
          'label':'Job type',
          'colwidth': '10%',
          'colclass': 'overflow',
          'format':lambda x: job_type.description[x.job_type]
          },
         {'field':'data_string',
          'label':'Description',
          'colclass': 'overflow',
          'format_raw':lambda x: x.data_string
          },
         {'field':'date',
          'label':'Date inserted',
          'format_raw':lambda x: date_format(x.date_touched)
          }
         ]},
 'categories': {
     'none': 'No categories found',
     'colset': [
         {'field': 'title',
          'label': 'Category',
          'format_raw': lambda x: x.for_listing
          },
          {'field': 'child_of',
          'label': 'Parent',
          'format_raw': lambda x: x.parent_c.for_listing
          },
          {'field': 'pages',
          'label': 'Pages in category',
Beispiel #4
0
def process_queue_publish(queue_control, blog):
    '''
    Processes the publishing queue for a given blog.
    Takes in a queue_control entry, and returns an integer of the number of
    jobs remaining in the queue for that blog.
    Typically invoked by the process_queue function.

    :param queue_control:
        The queue_control entry, from the queue, to use for this publishing queue run.
    :param blog:
        The blog object that is in context for this job.
    '''
    # from . import invalidate_cache
    # invalidate_cache()

    queue_control.lock()

    queue_original = Queue.select().order_by(Queue.priority.desc(),
                                             Queue.date_touched.desc()).where(
                                                 Queue.blog == blog,
                                                 Queue.is_control == False)

    queue = queue_original.limit(MAX_BATCH_OPS).naive()

    queue_original_length = queue_original.count()
    queue_length = queue.count()

    start_queue = time.clock()

    if queue_length > 0:
        logger.info("Queue job #{} @ {} (blog #{}, {} items) started.".format(
            queue_control.id, date_format(queue_control.date_touched),
            queue_control.blog.id, queue_original_length))

    removed_jobs = []

    start = time.clock()

    for q in queue:
        job_type.action[q.job_type](q)
        removed_jobs.append(q.id)

        if (time.clock() - start) > LOOP_TIMEOUT:
            break

    Queue.remove(removed_jobs)

    # we don't need to have an entirely new job!
    # we should recycle the existing one, yes?

    new_queue_control = Queue.control_job(blog)

    # new_queue_control = Queue.get(Queue.blog == blog,
    # Queue.is_control == True)

    queue_original_length -= len(removed_jobs)
    new_queue_control.data_integer = queue_original_length

    end_queue = time.clock()

    total_time = end_queue - start_queue

    if new_queue_control.data_integer <= 0:
        new_queue_control.delete_instance()
        logger.info(
            "Queue job #{} @ {} (blog #{}) finished ({:.4f} secs).".format(
                new_queue_control.id,
                date_format(new_queue_control.date_touched),
                new_queue_control.blog.id, total_time))

    else:
        # new_queue_control.is_running = False
        # new_queue_control.save()
        new_queue_control.unlock()
        logger.info(
            "Queue job #{} @ {} (blog #{}) processed {} items ({:.4f} secs, {} remaining)."
            .format(
                new_queue_control.id,
                date_format(new_queue_control.date_touched),
                new_queue_control.blog.id,
                len(removed_jobs),
                total_time,
                queue_original_length,
            ))

    return new_queue_control.data_integer
Beispiel #5
0
         },
         {'field':'in_pages',
         'label':'Pages',
         'format_raw':lambda x:x.in_pages.count()
         }
     ]
 },
 'system_log':{
     'none':'No log entries found',
     'colset':[
         {'field':'date',
         'label':'Timestamp',
         'xlabel_style':'width:1%',
         'colclass':'overflow',
         'colwidth':'1%',
         'format':lambda x:date_format(x.date)
         },
         {'field':'message',
         'label':'Log entry',
         'colclass':'xoverflow',
         'colwidth':'*',
         'format':lambda x:x.message
         
         }
     ]
  },
 'all_sites':{
     'none':'No pages found',
     'actions':(),
     'colset':(
         {'field':'name',
Beispiel #6
0
         'format': lambda x: x.priority
     }, {
         'field': 'job_type',
         'label': 'Job type',
         'colwidth': '10%',
         'colclass': 'overflow',
         'format': lambda x: job_type.description[x.job_type]
     }, {
         'field': 'data_string',
         'label': 'Description',
         'colclass': 'overflow',
         'format_raw': lambda x: x.data_string
     }, {
         'field': 'date',
         'label': 'Date inserted',
         'format_raw': lambda x: date_format(x.date_touched)
     }]
 },
 'categories': {
     'none':
     'No categories found',
     'colset': [{
         'field': 'title',
         'label': 'Category',
         'format_raw': lambda x: x.for_listing
     }, {
         'field': 'child_of',
         'label': 'Parent',
         'format_raw': lambda x: x.parent_c.for_listing
     }, {
         'field':
Beispiel #7
0
def process_queue_publish(queue_control, blog):
    '''
    Processes the publishing queue for a given blog.
    Takes in a queue_control entry, and returns an integer of the number of
    jobs remaining in the queue for that blog.
    Typically invoked by the process_queue function.

    :param queue_control:
        The queue_control entry, from the queue, to use for this publishing queue run.
    :param blog:
        The blog object that is in context for this job.
    '''
    # from . import invalidate_cache
    # invalidate_cache()

    queue_control.lock()

    queue_original = Queue.select().order_by(Queue.priority.desc(),
        Queue.date_touched.desc()).where(Queue.blog == blog,
        Queue.is_control == False)

    queue = queue_original.limit(MAX_BATCH_OPS).naive()

    queue_original_length = queue_original.count()
    queue_length = queue.count()

    start_queue = time.clock()

    if queue_length > 0:
        logger.info("Queue job #{} @ {} (blog #{}, {} items) started.".format(
            queue_control.id,
            date_format(queue_control.date_touched),
            queue_control.blog.id,
            queue_original_length))

    removed_jobs = []

    start = time.clock()

    for q in queue:
        job_type.action[q.job_type](q)
        removed_jobs.append(q.id)

        if (time.clock() - start) > LOOP_TIMEOUT:
            break

    Queue.remove(removed_jobs)

    # we don't need to have an entirely new job!
    # we should recycle the existing one, yes?

    new_queue_control = Queue.control_job(blog)

    # new_queue_control = Queue.get(Queue.blog == blog,
        # Queue.is_control == True)

    queue_original_length -= len(removed_jobs)
    new_queue_control.data_integer = queue_original_length

    end_queue = time.clock()

    total_time = end_queue - start_queue

    if new_queue_control.data_integer <= 0:
        new_queue_control.delete_instance()
        logger.info("Queue job #{} @ {} (blog #{}) finished ({:.4f} secs).".format(
            new_queue_control.id,
            date_format(new_queue_control.date_touched),
            new_queue_control.blog.id,
            total_time))

    else:
        # new_queue_control.is_running = False
        # new_queue_control.save()
        new_queue_control.unlock()
        logger.info("Queue job #{} @ {} (blog #{}) processed {} items ({:.4f} secs, {} remaining).".format(
            new_queue_control.id,
            date_format(new_queue_control.date_touched),
            new_queue_control.blog.id,
            len(removed_jobs),
            total_time,
            queue_original_length,
            ))

    return new_queue_control.data_integer