Beispiel #1
0
def cleanup_old_jobs():
    queues = [
        django_rq.get_queue(name=queue_name)
        for queue_name in settings.RQ_QUEUE_NAMES
    ]
    queues.append(django_rq.get_failed_queue())
    for queue in queues:
        for job in queue.get_jobs():
            if job.status in status.FINAL_STATUSES:
                job.delete()
Beispiel #2
0
 def handle(self, **options):
     failed_jobs = list(get_failed_queue().jobs)
     if failed_jobs:
         print 'You have tasks in the failed queue:'
         print
     for job in failed_jobs[:MAX_JOBS]:
         print job.func_name
     if len(failed_jobs) > MAX_JOBS:
         print
         print '... {} more failed jobs'.format(len(failed_jobs) - MAX_JOBS)
Beispiel #3
0
 def _handle_failed_jobs(self):
     failed_queue = django_rq.get_failed_queue()
     for rq_job_id in failed_queue.job_ids:
         try:
             conversion_job = conversion_models.Job.objects.get(
                 rq_job_id=rq_job_id)
         except ObjectDoesNotExist as e:
             logger.exception(e)
             continue
         self._set_failed_unless_final(conversion_job, rq_job_id=rq_job_id)
         self._notify(conversion_job)
Beispiel #4
0
    def queryset(self, request, queryset):
        # not optimum, but readable:
        # values below could have been computed
        # in specific if statements below
        # but it would have been cumbersome

        # UGLY hack to get current job
        # https://github.com/nvie/rq/pull/269
        in_progress_ids = []
        redis_conn = django_rq.get_connection()
        for k in redis_conn.keys():
            try:
                data = unpickle(redis_conn.hget(k, 'data'))
                status = redis_conn.hget(k, 'status')
                if data[0] == 'archives.admin.encode' and status == 'started':
                    in_progress_ids = [data[2][0], ]
                    break
            except:
                pass

        queue = django_rq.get_queue('default')
        in_queue_ids = [job.args[0] for job in queue.jobs
                        if job.func_name == 'archives.admin.encode']
        failed_queue = django_rq.get_failed_queue('default')
        failed_ids = [job.args[0] for job in failed_queue.jobs
                      if job.func_name == 'archives.admin.encode']

        if self.value() == 'no_file':
            # We can't do file__isnull for queryset
            # because FileField is represented internally
            # as a CharField, and Django stores non files
            # as an empty string '' in the database.
            return queryset.filter(file="")

        if self.value() == 'in_queue':
            return queryset.filter(id__in=in_queue_ids)

        if self.value() == 'in_progress':
            return queryset.filter(id__in=in_progress_ids)

        if self.value() == 'failed':
            return queryset.filter(id__in=failed_ids)

        if self.value() == 'encoded':
            encoded = [media.id for media in queryset if media.is_encoded]
            return queryset.exclude(file="").exclude(id__in=in_queue_ids)\
                           .exclude(id__in=in_progress_ids)\
                           .exclude(id__in=failed_ids).filter(id__in=encoded)

        if self.value() == 'not_encoded':
            not_encoded = [media.id for media in queryset if not media.is_encoded]
            return queryset.exclude(file="").exclude(id__in=in_progress_ids)\
                           .exclude(id__in=failed_ids).filter(id__in=not_encoded)\
                           .exclude(id__in=in_queue_ids)
Beispiel #5
0
 def _failed_copies(self):
     """ return failed copies """
     failed_copies = []
     failed_queue = django_rq.get_failed_queue('archive')
     for index, job in enumerate(failed_queue.jobs):
         # HACK, BUG, FIXME : django-rq should return
         # only archive failed queue
         # but it returns old jobs failed !
         if job.func_name == 'archives.admin.archive':
             failed_copies.append(job.args[0])
     return failed_copies
Beispiel #6
0
def exception_handler(job, *exc_info):
    try:
        job_status = JobStatus.objects.get(job_id=job.get_id())
        job_status.status = STATUS_FAILED
        job_status.save(update_fields=['status'])
    except:
        pass

    fq = get_failed_queue()
    exc_string = ''.join(traceback.format_exception(*exc_info))
    fq.quarantine(job, exc_info=exc_string)
Beispiel #7
0
def delete_all_tasks_from_queue(queue_name):
    if queue_name == "failed":
        q = django_rq.get_failed_queue()
    elif queue_name == "parser":
        q = django_rq.get_queue("parser")
    else:
        q = django_rq.get_queue("default")

    while True:
        current_job = q.dequeue()
        if not current_job:
            break
        current_job.delete()
Beispiel #8
0
def delete_all_tasks_from_queue(queue_name):
    if queue_name == "failed":
        q = django_rq.get_failed_queue()
    elif queue_name == "parser":
        q = django_rq.get_queue("parser")
    else:
        q = django_rq.get_queue("default")

    while True:
        job = q.dequeue()
        if not job:
            break
        job.delete()
Beispiel #9
0
def get_failed_tasks(request):
    import django_rq
    import json
    from time import strftime

    queue = django_rq.get_failed_queue()

    jobdata = list()
    for job in queue.jobs:
        job_dict = { 'job_id' : job.id, 'func_name': job.func_name, 'error_message': job.exc_info, 'ended_at': job.ended_at.strftime("%a, %d %b %Y %H:%M:%S +0000"), 'enqueued_at' : job.enqueued_at.strftime("%a, %d %b %Y %H:%M:%S +0000")}
        jobdata.append(job_dict)

    data = json.dumps(jobdata)
    return HttpResponse(data, content_type='application/json')
Beispiel #10
0
    def launch_encodings(self, request, queryset):
        # we exclude media without file, this
        # would directly go inside failed queue
        for media in queryset.exclude(file=''):
            # we shouldn't try to encode
            # media that are already in queue
            # or in progress

            # remove from failed queue
            failed_queue = django_rq.get_failed_queue()
            for job in failed_queue.jobs:
                if job.args[0] == media.id:
                    failed_queue.remove(job)
            queue = django_rq.get_queue('default')
            job = queue.enqueue(call_command, args=('encode', media.id, ), timeout=86400)
Beispiel #11
0
def get_failed_tasks(request):
    queue = django_rq.get_failed_queue()
    jobdata = list()
    for job in queue.jobs:

        job_dict = {
            'job_id': job.id,
            'func_name': job.description,
            'error_message': job.exc_info,
            'ended_at': job.ended_at.strftime("%a, %d %b %Y %H:%M:%S +0000"),
            'enqueued_at': job.enqueued_at.strftime("%a, %d %b %Y %H:%M:%S +0000"),
            'args': job.args
        }

        jobdata.append(job_dict)

    data = json.dumps(jobdata)
    return HttpResponse(data, content_type='application/json')
Beispiel #12
0
    def setUp(self):
        for queueConfig in settings.RQ_QUEUES.itervalues():
            queueConfig['ASYNC'] = True

        haystack.connections.reload('default')
        # init django_rq
        q = django_rq.get_failed_queue()
        q.empty()
        q_archive = django_rq.get_queue('archive')
        q_archive.empty()
        q_default = django_rq.get_queue('default')
        q_default.empty()
        # must redefine archives_settings constant because archives_settings overriding not possible
        archives_settings.STREAM_EXT_AUDIO_ROOT = os.path.join(settings.STREAM_ROOT, 'ext', 'audio')
        archives_settings.STREAM_EXT_VIDEO_ROOT = os.path.join(settings.STREAM_ROOT, 'ext', 'video')
        archives_settings.STREAM_INT_AUDIO_ROOT = os.path.join(settings.STREAM_ROOT, 'int', 'audio')
        archives_settings.STREAM_INT_VIDEO_ROOT = os.path.join(settings.STREAM_ROOT, 'int', 'video')

        super(BaseTestCase, self).setUp()
Beispiel #13
0
    def _encoding_state(self):
        # No file associated with the media
        if not self.file:
            return ENCODING_NO_FILE

        # The file is currently processed
        redis_conn = django_rq.get_connection()
        for k in redis_conn.keys():
            try:
                data = unpickle(redis_conn.hget(k, 'data'))
                status = redis_conn.hget(k, 'status')
                if data[0] == 'archives.admin.encode' and status == 'started' and self.id == data[2][0]:
                    return ENCODING_IN_PROGRESS
            except:
                pass

        # The file is currently in queue for encoding process
        queue = django_rq.get_queue('default')
        for index, job in enumerate(queue.jobs):
            if job.func_name == 'archives.admin.encode':
                if job.args[0] == self.id:
                    return ENCODING_IN_QUEUE, index

        # If not, the encoding process should have failed
        failed_queue = django_rq.get_failed_queue('default')
        for job in failed_queue.jobs:
            if job.func_name == 'archives.admin.encode':
                if job.args[0] == self.id:
                    return ENCODING_FAILED

        # Or, there's no job for this media
        # So, we have two cases: the encoded files are available
        # and media is encoded, or files are not availabble
        # and we checked before that we weren't processing file
        # or having a 'failed' encoding process
        # so, the file is just 'not encoded'
        if self.file:
            # Test if files in stream repository exist
            if self.is_encoded:
                return ENCODING_ENCODED
            else:
                return ENCODING_NOT_ENCODED
Beispiel #14
0
 def tearDown(self):
     q = get_failed_queue()
     q.empty()
Beispiel #15
0
 def tearDown(self):
     q = get_failed_queue()
     q.empty()
Beispiel #16
0
def clear_failed():
    queue = django_rq.get_failed_queue()
    return queue.empty()
Beispiel #17
0
import logging
from rq import Queue
from rq.job import Job
from collections import deque
from rq import get_current_job
from django.conf import settings
import django_rq
from rq.registry import FinishedJobRegistry, StartedJobRegistry

logger = logging.getLogger('uprilogger')

q = django_rq.get_queue()
# rqq = Queue(connection=django_rq.get_connection())
finished_job_registry = FinishedJobRegistry(connection=django_rq.get_connection())
started_job_registry = StartedJobRegistry(connection=django_rq.get_connection())
failed_queue = django_rq.get_failed_queue()


#
# Job management
#


def job_message(message):
    job = get_current_job(connection=django_rq.get_connection())
    if not job.meta.get('messages'):
        job.meta['messages'] = deque()
    job.meta['messages'].append(message)
    job.save()

Beispiel #18
0
import logging
from rq import Queue
from redis import Redis
from collections import deque
from rq import get_current_job
from django.conf import settings
import django_rq
from rq.registry import FinishedJobRegistry, StartedJobRegistry

logger = logging.getLogger('uprilogger')

q = django_rq.get_queue()
finished_job_registry = FinishedJobRegistry(connection=django_rq.get_connection())
started_job_registry = StartedJobRegistry(connection=django_rq.get_connection())
failed_queue = django_rq.get_failed_queue()


#
# Job management
#


def job_message(message):
    job = get_current_job(connection=django_rq.get_connection())
    if not job.meta.get('messages'):
        job.meta['messages'] = deque()
    job.meta['messages'].append(message)
    job.save()


def queue_job(job, args):