示例#1
0
class TestFinishedJobRegistry(RQTestCase):

    def setUp(self):
        super(TestFinishedJobRegistry, self).setUp()
        self.registry = FinishedJobRegistry(connection=self.testconn)

    def test_cleanup(self):
        """Finished job registry removes expired jobs."""
        timestamp = current_timestamp()
        self.testconn.zadd(self.registry.key, 1, 'foo')
        self.testconn.zadd(self.registry.key, timestamp + 10, 'bar')
        self.testconn.zadd(self.registry.key, timestamp + 30, 'baz')

        self.registry.cleanup()
        self.assertEqual(self.registry.get_job_ids(), ['bar', 'baz'])

        self.registry.cleanup(timestamp + 20)
        self.assertEqual(self.registry.get_job_ids(), ['baz'])

    def test_jobs_are_put_in_registry(self):
        """Completed jobs are added to FinishedJobRegistry."""
        self.assertEqual(self.registry.get_job_ids(), [])
        queue = Queue(connection=self.testconn)
        worker = Worker([queue])

        # Completed jobs are put in FinishedJobRegistry
        job = queue.enqueue(say_hello)
        worker.perform_job(job)
        self.assertEqual(self.registry.get_job_ids(), [job.id])

        # Failed jobs are not put in FinishedJobRegistry
        failed_job = queue.enqueue(div_by_zero)
        worker.perform_job(failed_job)
        self.assertEqual(self.registry.get_job_ids(), [job.id])
示例#2
0
    def test_finished_jobs(self):
        """Ensure that finished jobs page works properly."""
        queue = get_queue('django_rq_test')
        queue_index = get_queue_index('django_rq_test')

        job = queue.enqueue(access_self)
        registry = FinishedJobRegistry(queue.name, queue.connection)
        registry.add(job, 2)
        response = self.client.get(
            reverse('rq_finished_jobs', args=[queue_index])
        )
        self.assertEqual(response.context['jobs'], [job])
示例#3
0
文件: test_registry.py 项目: nvie/rq
class TestFinishedJobRegistry(RQTestCase):

    def setUp(self):
        super(TestFinishedJobRegistry, self).setUp()
        self.registry = FinishedJobRegistry(connection=self.testconn)

    def test_key(self):
        self.assertEqual(self.registry.key, 'rq:finished:default')

    def test_cleanup(self):
        """Finished job registry removes expired jobs."""
        timestamp = current_timestamp()
        self.testconn.zadd(self.registry.key, {'foo': 1})
        self.testconn.zadd(self.registry.key, {'bar': timestamp + 10})
        self.testconn.zadd(self.registry.key, {'baz': timestamp + 30})

        self.registry.cleanup()
        self.assertEqual(self.registry.get_job_ids(), ['bar', 'baz'])

        self.registry.cleanup(timestamp + 20)
        self.assertEqual(self.registry.get_job_ids(), ['baz'])

    def test_jobs_are_put_in_registry(self):
        """Completed jobs are added to FinishedJobRegistry."""
        self.assertEqual(self.registry.get_job_ids(), [])
        queue = Queue(connection=self.testconn)
        worker = Worker([queue])

        # Completed jobs are put in FinishedJobRegistry
        job = queue.enqueue(say_hello)
        worker.perform_job(job, queue)
        self.assertEqual(self.registry.get_job_ids(), [job.id])

        # When job is deleted, it should be removed from FinishedJobRegistry
        self.assertEqual(job.get_status(), JobStatus.FINISHED)
        job.delete()
        self.assertEqual(self.registry.get_job_ids(), [])

        # Failed jobs are not put in FinishedJobRegistry
        failed_job = queue.enqueue(div_by_zero)
        worker.perform_job(failed_job, queue)
        self.assertEqual(self.registry.get_job_ids(), [])
示例#4
0
 def test_getting_registries(self):
     """Getting job registries from queue object"""
     queue = Queue('example')
     self.assertEqual(queue.scheduled_job_registry,
                      ScheduledJobRegistry(queue=queue))
     self.assertEqual(queue.started_job_registry,
                      StartedJobRegistry(queue=queue))
     self.assertEqual(queue.failed_job_registry,
                      FailedJobRegistry(queue=queue))
     self.assertEqual(queue.deferred_job_registry,
                      DeferredJobRegistry(queue=queue))
     self.assertEqual(queue.finished_job_registry,
                      FinishedJobRegistry(queue=queue))
示例#5
0
def get_all_jobs(connection=None, queue_name=DEFAULT_QUEUE_NAME):
    from redis import Redis
    from rq import Queue
    from rq.job import Job
    from rq.registry import FinishedJobRegistry, FailedJobRegistry

    queue = Queue(queue_name, connection=connection or Redis())
    queued_jobs = queue.job_ids
    finished_jobs = FinishedJobRegistry(queue=queue).get_job_ids()
    failed_jobs = FailedJobRegistry(queue=queue).get_job_ids()
    return Job.fetch_many(
        queued_jobs + finished_jobs + failed_jobs, connection=connection
    )
示例#6
0
    def test_work_with_ssl(self):
        connection = find_empty_redis_database(ssl=True)
        queue = Queue(connection=connection)
        worker = Worker(queues=[queue], connection=connection)
        p = Process(target=kill_worker, args=(os.getpid(), False, 5))

        p.start()
        queue.enqueue_at(datetime(2019, 1, 1, tzinfo=timezone.utc), say_hello)
        worker.work(burst=False, with_scheduler=True)
        p.join(1)
        self.assertIsNotNone(worker.scheduler)
        registry = FinishedJobRegistry(queue=queue)
        self.assertEqual(len(registry), 1)
示例#7
0
def connect_redis():
    """
    Connect to Redis and store the connection for in the application
    context.
    """

    g.redis = Redis(host=app.config['REDIS_HOST'],
                    port=app.config['REDIS_PORT'],
                    db=app.config['REDIS_DB'])

    g.queue = Queue(connection=g.redis)
    g.started_registry = StartedJobRegistry(connection=g.redis)
    g.finished_registry = FinishedJobRegistry(connection=g.redis)
示例#8
0
    def test_job_delete_removes_itself_from_registries(self):
        """job.delete() should remove itself from job registries"""
        connection = self.testconn
        job = Job.create(func=fixtures.say_hello, status=JobStatus.FAILED,
                         connection=self.testconn, origin='default')
        job.save()
        registry = FailedJobRegistry(connection=self.testconn)
        registry.add(job, 500)

        job.delete()
        self.assertFalse(job in registry)

        job = Job.create(func=fixtures.say_hello, status=JobStatus.FINISHED,
                         connection=self.testconn, origin='default')
        job.save()

        registry = FinishedJobRegistry(connection=self.testconn)
        registry.add(job, 500)

        job.delete()
        self.assertFalse(job in registry)

        job = Job.create(func=fixtures.say_hello, status=JobStatus.STARTED,
                         connection=self.testconn, origin='default')
        job.save()

        registry = StartedJobRegistry(connection=self.testconn)
        registry.add(job, 500)

        job.delete()
        self.assertFalse(job in registry)

        job = Job.create(func=fixtures.say_hello, status=JobStatus.DEFERRED,
                         connection=self.testconn, origin='default')
        job.save()

        registry = DeferredJobRegistry(connection=self.testconn)
        registry.add(job, 500)

        job.delete()
        self.assertFalse(job in registry)

        job = Job.create(func=fixtures.say_hello, status=JobStatus.SCHEDULED,
                         connection=self.testconn, origin='default')
        job.save()

        registry = ScheduledJobRegistry(connection=self.testconn)
        registry.add(job, 500)

        job.delete()
        self.assertFalse(job in registry)
示例#9
0
文件: views.py 项目: addys888/metaci
def info(request, token):
    """
    Return the HireFire json data needed to scale worker dynos
    """
    if not settings.HIREFIRE_TOKEN:
        return HttpResponseBadRequest(
            "Hirefire not configured.  Set the HIREFIRE_TOKEN environment variable on the app to use Hirefire for dyno scaling"
        )
    if token != settings.HIREFIRE_TOKEN:
        raise PermissionDenied("Invalid token")

    current_tasks = 0

    queues = []
    for index, config in enumerate(QUEUES_LIST):

        queue = get_queue_by_index(index)
        connection = queue.connection

        # Only look at the default queue
        if queue.name != "default":
            continue

        queue_data = {
            "name": queue.name,
            "jobs": queue.count,
            "index": index,
            "connection_kwargs": connection.connection_pool.connection_kwargs,
        }

        connection = get_connection(queue.name)
        all_workers = Worker.all(connection=connection)
        queue_workers = [worker for worker in all_workers if queue in worker.queues]
        queue_data["workers"] = len(queue_workers)

        finished_job_registry = FinishedJobRegistry(queue.name, connection)
        started_job_registry = StartedJobRegistry(queue.name, connection)
        deferred_job_registry = DeferredJobRegistry(queue.name, connection)
        queue_data["finished_jobs"] = len(finished_job_registry)
        queue_data["started_jobs"] = len(started_job_registry)
        queue_data["deferred_jobs"] = len(deferred_job_registry)

        current_tasks += queue_data["jobs"]
        current_tasks += queue_data["started_jobs"]

        queues.append(queue_data)

    payload = [{"quantity": current_tasks, "name": "worker"}]

    payload = json.dumps(payload)
    return HttpResponse(payload, content_type="application/json")
示例#10
0
def serialize_queues(instance_number, queues):
    return [
        dict(
            name=q.name,
            count=q.count,
            queued_url=url_for(
                ".jobs_overview",
                instance_number=instance_number,
                queue_name=q.name,
                registry_name="queued",
                per_page="8",
                page="1",
            ),
            failed_job_registry_count=FailedJobRegistry(q.name).count,
            failed_url=url_for(
                ".jobs_overview",
                instance_number=instance_number,
                queue_name=q.name,
                registry_name="failed",
                per_page="8",
                page="1",
            ),
            started_job_registry_count=StartedJobRegistry(q.name).count,
            started_url=url_for(
                ".jobs_overview",
                instance_number=instance_number,
                queue_name=q.name,
                registry_name="started",
                per_page="8",
                page="1",
            ),
            deferred_job_registry_count=DeferredJobRegistry(q.name).count,
            deferred_url=url_for(
                ".jobs_overview",
                instance_number=instance_number,
                queue_name=q.name,
                registry_name="deferred",
                per_page="8",
                page="1",
            ),
            finished_job_registry_count=FinishedJobRegistry(q.name).count,
            finished_url=url_for(
                ".jobs_overview",
                instance_number=instance_number,
                queue_name=q.name,
                registry_name="finished",
                per_page="8",
                page="1",
            ),
        ) for q in queues
    ]
示例#11
0
def get_statistics(run_maintenance_tasks=False):
    queues = []
    for index, config in enumerate(QUEUES_LIST):

        queue = get_queue_by_index(index)
        connection = queue.connection
        connection_kwargs = connection.connection_pool.connection_kwargs

        if run_maintenance_tasks:
            clean_registries(queue)
            clean_worker_registry(queue)

        # Raw access to the first item from left of the redis list.
        # This might not be accurate since new job can be added from the left
        # with `at_front` parameters.
        # Ideally rq should supports Queue.oldest_job
        last_job_id = connection.lindex(queue.key, 0)
        last_job = queue.fetch_job(
            last_job_id.decode('utf-8')) if last_job_id else None
        if last_job:
            oldest_job_timestamp = to_localtime(last_job.enqueued_at)\
                .strftime('%Y-%m-%d, %H:%M:%S')
        else:
            oldest_job_timestamp = "-"

        # parse_class and connection_pool are not needed and not JSON serializable
        connection_kwargs.pop('parser_class', None)
        connection_kwargs.pop('connection_pool', None)

        queue_data = {
            'name': queue.name,
            'jobs': queue.count,
            'oldest_job_timestamp': oldest_job_timestamp,
            'index': index,
            'connection_kwargs': connection_kwargs
        }

        connection = get_connection(queue.name)
        queue_data['workers'] = Worker.count(queue=queue)

        finished_job_registry = FinishedJobRegistry(queue.name, connection)
        started_job_registry = StartedJobRegistry(queue.name, connection)
        deferred_job_registry = DeferredJobRegistry(queue.name, connection)
        failed_job_registry = FailedJobRegistry(queue.name, connection)
        queue_data['finished_jobs'] = len(finished_job_registry)
        queue_data['started_jobs'] = len(started_job_registry)
        queue_data['deferred_jobs'] = len(deferred_job_registry)
        queue_data['failed_jobs'] = len(failed_job_registry)

        queues.append(queue_data)
    return {'queues': queues}
示例#12
0
def finished_jobs(request, queue_index):
    queue_index = int(queue_index)
    queue = get_queue_by_index(queue_index)

    registry = FinishedJobRegistry(queue.name, queue.connection)

    items_per_page = 100
    num_jobs = len(registry)
    page = int(request.GET.get('page', 1))
    jobs = []

    if num_jobs > 0:
        last_page = int(ceil(num_jobs / items_per_page))
        page_range = range(1, last_page + 1)
        offset = items_per_page * (page - 1)
        job_ids = registry.get_job_ids(offset, items_per_page)

        for job_id in job_ids:
            try:
                jobs.append(Job.fetch(job_id, connection=queue.connection))
            except NoSuchJobError:
                pass

    else:
        page_range = []

    context_data = admin.site.each_context(request)
    context_data.update({
        'title': _("Finished Jobs"),
        'queue': queue,
        'queue_index': queue_index,
        'jobs': jobs,
        'num_jobs': num_jobs,
        'page': page,
        'page_range': page_range,
        'job_status': _('Finished'),
    })
    return render(request, 'django_rq/jobs.html', context_data)
示例#13
0
def jobs():
    with Connection(redis_client):
        q = Queue()
        # possibly also use DeferredJobRegistry()
        jobs = OrderedDict([
            (name, [q.fetch_job(job_id) for job_id in registry.get_job_ids()])
            for (name,
                 registry) in [('Waiting', q), (
                     'Running',
                     StartedJobRegistry()), (
                         'Finished',
                         FinishedJobRegistry()), ('Failed', Queue('failed'))]
        ])
        return render_template('jobs.html', jobs=jobs)
示例#14
0
    def test_clean_registries(self):
        """clean_registries() cleans Started and Finished job registries."""

        queue = Queue(connection=self.testconn)

        finished_job_registry = FinishedJobRegistry(connection=self.testconn)
        self.testconn.zadd(finished_job_registry.key, 1, 'foo')

        started_job_registry = StartedJobRegistry(connection=self.testconn)
        self.testconn.zadd(started_job_registry.key, 1, 'foo')

        clean_registries(queue)
        self.assertEqual(self.testconn.zcard(finished_job_registry.key), 0)
        self.assertEqual(self.testconn.zcard(started_job_registry.key), 0)
示例#15
0
def status_by_map(map_id, version, file_type):
    """ Retrieve status of a render job by it `map_id`, `verison` and
    `file_type`


    :param map_id: map id
    :param version: version
    :param file_type: filetype
    :status 200: informations about current job
    :status 400: invalid file type
    :status 404: map and or version of map not found
    """
    queue = current_app.task_queue

    # check if it is queued to be rendered
    for job in queue.get_jobs():
        if job.meta['map_id'] == map_id and\
           job.meta['version'] == version and\
           job.meta['file_type'] == file_type:
            return status_by_job(job)

    # check if it is currently rendering
    started = StartedJobRegistry(queue=queue)
    job = _find_in_registry(started, map_id, version, file_type)
    if job:
        return status_by_job(job)

    # it it's not queued or rendering, it needs to be already rendered
    try:
        file_info = get_file_info(map_id, version, file_type)
        if not file_exists(file_info):
            abort(404)
    except UnsupportedFileType:
        abort(400)

    # enhance output with job_id if it is recently rendered
    finished = FinishedJobRegistry(queue=queue)
    job = _find_in_registry(finished, map_id, version, file_type)
    if job:
        return status_by_job(job)

    data = {
        'map_id': map_id,
        'file_type': file_type,
        'version': version,
        'status': 'finished',
        'url': url_for('static', filename=file_info['path'], _external=True)
    }
    return jsonify(**data)
示例#16
0
def finished_jobs(request, queue_index):
    queue_index = int(queue_index)
    queue = get_queue_by_index(queue_index)

    registry = FinishedJobRegistry(queue.name, queue.connection)

    items_per_page = 100
    num_jobs = len(registry)
    page = int(request.GET.get('page', 1))
    jobs = []

    if num_jobs > 0:
        last_page = int(ceil(num_jobs / items_per_page))
        page_range = range(1, last_page + 1)
        offset = items_per_page * (page - 1)
        job_ids = registry.get_job_ids(offset, offset + items_per_page - 1)

        for job_id in job_ids:
            try:
                jobs.append(Job.fetch(job_id, connection=queue.connection))
            except NoSuchJobError:
                pass

    else:
        page_range = []

    context_data = {
        'queue': queue,
        'queue_index': queue_index,
        'jobs': jobs,
        'num_jobs': num_jobs,
        'page': page,
        'page_range': page_range,
        'job_status': 'Finished',
    }
    return render(request, 'django_rq/jobs.html', context_data)
示例#17
0
def get_finished_tasks(request):

    current_queue = request.GET.get('queue')
    queue = django_rq.get_queue(current_queue)
    registry = FinishedJobRegistry(queue.name, queue.connection)

    items_per_page = 10
    num_jobs = len(registry)
    jobs = []

    if num_jobs > 0:
        offset = 0
        job_ids = registry.get_job_ids(offset, items_per_page)

        for job_id in job_ids:
            try:
                jobs.append(Job.fetch(job_id, connection=queue.connection))
            except NoSuchJobError:
                pass

    jobdata = list()
    for job in jobs:

        job_dict = {
            'job_id': job.id,
            'func_name': job.func_name,
            'ended_at': job.ended_at.strftime("%a, %d %b %Y %H:%M:%S +0000"),
            'enqueued_at':
            job.enqueued_at.strftime("%a, %d %b %Y %H:%M:%S +0000"),
            'args': [str(arg) for arg in job.args]
        }

        jobdata.append(job_dict)

    data = json.dumps(jobdata)
    return HttpResponse(data, content_type='application/json')
示例#18
0
def monitor():
    '''
    DOESN'T WORK
    Meant to run until all jobs are finished. Monitors queues and adds completed graphs to Blazegraph.
    '''
    sregistry = StartedJobRegistry(connection=redis_conn)
    fregistry = FinishedJobRegistry(connection=redis_conn)
    print high.get_job_ids()
    print sregistry.get_job_ids()
    while sregistry.get_job_ids():
        print 'in sregistry...'
        print fregistry.get_job_ids()
        for job_id in fregistry.get_job_ids():
            job = Job.fetch(job_id, connection=redis_conn)
            # sanity check
            if type(job.result) is Graph:
                print ('inserting', job_id, job)
                logging.info('inserting', job_id, job)
                insert(job.result)
        print 'sleeping 5'
        time.sleep(5)

    print 'all jobs complete'
    logging.info('monitor() exiting...all jobs complete')
示例#19
0
    def test_work_with_serializer(self):
        queue = Queue(connection=self.testconn, serializer=JSONSerializer)
        worker = Worker(queues=[queue],
                        connection=self.testconn,
                        serializer=JSONSerializer)
        p = Process(target=kill_worker, args=(os.getpid(), False, 5))

        p.start()
        queue.enqueue_at(datetime(2019, 1, 1, tzinfo=timezone.utc),
                         say_hello,
                         meta={'foo': 'bar'})
        worker.work(burst=False, with_scheduler=True)
        p.join(1)
        self.assertIsNotNone(worker.scheduler)
        registry = FinishedJobRegistry(queue=queue)
        self.assertEqual(len(registry), 1)
示例#20
0
 def get(self):
     logging.debug("Get query params")
     parser = reqparse.RequestParser()
     parser.add_argument('id', required=True, type=str)
     parser.add_argument('check', required=True, type=str)
     args = parser.parse_args()
     id = args['id']
     check = args['check']
     ts = int(time.time())
     logging.debug("Get query all jobs")
     try:
         registry = FinishedJobRegistry('default', connection=redis)
         job_ids = registry.get_job_ids()
         tsarr = []
         tmparr = []
         for i in job_ids:
             logging.debug("Get job id " + i)
             joba = q.fetch_job(i)
             try:
                 logging.debug("Check job for conditions  " + i + " id=" +
                               joba.meta['id'] + " check=" +
                               joba.meta['check'] + " ts=" +
                               joba.meta['ts'])
                 if joba.meta['id'] == id and joba.meta['check'] == check:
                     logging.debug("Find  job for id=" + joba.meta['id'] +
                                   " check=" + joba.meta['check'] + " ts=" +
                                   joba.meta['ts'])
                     tsarr.append(int(joba.meta['ts']))
                     tmparr.append({"job": i, "ts": str(joba.meta['ts'])})
             except:
                 pass
         if tsarr:
             logging.debug("Find max time ")
             logging.debug("For array job  " + str(tmparr))
             max_time = max(tsarr)
             logging.debug("Max time " + str(max_time))
             for j in tmparr:
                 if int(j['ts']) == max_time:
                     job_max_id = j['job']
                     logging.debug("Max time  job " + str(job_max_id))
                     job_m = q.fetch_job(job_max_id)
             headers = {'Content-Type': 'application/json'}
             data = job_m.result
             return make_response(data, 200, headers)
     except Exception as e:
         print(str(e))
         return "Not found", 404
示例#21
0
def get_meta_info(app_info):
    with Connection(redis.from_url(app_info['REDIS_URL'])):
        q = Queue('default')
        registry = StartedJobRegistry('default')
        f_registry = FinishedJobRegistry('default')

        # Retrieve task ids
        all_task_ids = get_all_task_ids(app_info)

        task_data = {
            'queue': q,
            'task_ids': all_task_ids,
        }

        data = {}
        data['running_tasks'] = fetch_tasks_by_category(task_data, "running")
        data['queued_tasks'] = fetch_tasks_by_category(task_data, "queued")
        data['finished_tasks'] = fetch_tasks_by_category(task_data, "finished")

    with Connection(redis.from_url(app_info['REDIS_URL'])):
        q = Queue('aggregator')

        # Get all aggregated finished tasks
        task_data = {
            'queue': q,
            'task_ids': get_all_finished_tasks_from('aggregator', app_info),
        }
        data['agg_finished_tasks'] = fetch_tasks_by_category(
            task_data, "finished")

        # Get all aggregated queued tasks
        task_data = {
            'queue': q,
            'task_ids': get_all_queued_tasks_from('aggregator', app_info),
        }
        data['agg_queued_tasks'] = fetch_tasks_by_category(task_data, "queued")

        # Get all aggregated running tasks
        task_data = {
            'queue': q,
            'task_ids': get_all_running_tasks_from('aggregator', app_info),
        }
        data['agg_running_tasks'] = fetch_tasks_by_category(
            task_data, "running")

        return data
示例#22
0
class TestFinishedJobRegistry(RQTestCase):

    def setUp(self):
        super(TestFinishedJobRegistry, self).setUp()
        self.registry = FinishedJobRegistry(connection=self.testconn)

    def test_key(self):
        self.assertEqual(self.registry.key, 'rq:finished:default')

    def test_cleanup(self):
        """Finished job registry removes expired jobs."""
        timestamp = current_timestamp()
        self.testconn.zadd(self.registry.key, 1, 'foo')
        self.testconn.zadd(self.registry.key, timestamp + 10, 'bar')
        self.testconn.zadd(self.registry.key, timestamp + 30, 'baz')

        self.registry.cleanup()
        self.assertEqual(self.registry.get_job_ids(), ['bar', 'baz'])

        self.registry.cleanup(timestamp + 20)
        self.assertEqual(self.registry.get_job_ids(), ['baz'])

    def test_jobs_are_put_in_registry(self):
        """Completed jobs are added to FinishedJobRegistry."""
        self.assertEqual(self.registry.get_job_ids(), [])
        queue = Queue(connection=self.testconn)
        worker = Worker([queue])

        # Completed jobs are put in FinishedJobRegistry
        job = queue.enqueue(say_hello)
        worker.perform_job(job, queue)
        self.assertEqual(self.registry.get_job_ids(), [job.id])

        # Failed jobs are not put in FinishedJobRegistry
        failed_job = queue.enqueue(div_by_zero)
        worker.perform_job(failed_job, queue)
        self.assertEqual(self.registry.get_job_ids(), [job.id])
示例#23
0
    def test_pass_queue_via_commandline_args(self):
        """
        Checks that passing queues via commandline arguments works
        """
        queue_names = ['django_rq_test', 'django_rq_test2']
        jobs = []
        for queue_name in queue_names:
            queue = get_queue(queue_name)
            jobs.append({
                'job': queue.enqueue(divide, 42, 1),
                'finished_job_registry': FinishedJobRegistry(queue.name, queue.connection),
            })

        call_command('rqworker', *queue_names, burst=True)

        for job in jobs:
            self.assertTrue(job['job'].is_finished)
            self.assertIn(job['job'].id, job['finished_job_registry'].get_job_ids())
示例#24
0
def check_upload_queue():
    # Failed jobs
    for id in failed_jobs_by_queue(UploadQueue):
        update_status(id, "Upload", "Upload", 0, 0)
        if os.path.exists(run_folder + str(id)):
            shutil.rmtree(run_folder + str(id))
        try:
            Job.fetch(str(id), connection=redisClient).delete()
        except:
            return
    # Finished jobs
    for id in FinishedJobRegistry('UploadQueue',
                                  connection=redisClient).get_job_ids():
        if os.path.exists(run_folder + str(id)):
            shutil.rmtree(run_folder + str(id))
        try:
            Job.fetch(str(id), connection=redisClient).delete()
        except:
            return
示例#25
0
    def task():
        startedjr = StartedJobRegistry("default", connection=redis_conn)
        finishedjr = FinishedJobRegistry("default", connection=redis_conn)
        failedjr = FailedJobRegistry("default", connection=redis_conn)
        deferredjr = DeferredJobRegistry("defaut", connection=redis_conn)

        def job_registry_to_json(jr):
            return {
                "job_ids": jr.get_job_ids(),
                "expired_job_ids": jr.get_expired_job_ids(),
            }

        return json.dumps({
            "queued": q.job_ids,
            "started": job_registry_to_json(startedjr),
            "finished": job_registry_to_json(finishedjr),
            "failed": job_registry_to_json(failedjr),
            "deferred": job_registry_to_json(deferredjr)
        })
示例#26
0
def get_queue_registry_jobs_count(queue_name, registry_name, offset, per_page):
    queue = Queue(queue_name)
    if registry_name == 'failed':
        current_queue = FailedJobRegistry(queue_name)
    elif registry_name == 'deferred':
        current_queue = DeferredJobRegistry(queue_name)
    elif registry_name == 'started':
        current_queue = StartedJobRegistry(queue_name)
    elif registry_name == 'finished':
        current_queue = FinishedJobRegistry(queue_name)
    else:
        current_queue = queue
    total_items = current_queue.count

    job_ids = current_queue.get_job_ids(offset, per_page)
    current_queue_jobs = [queue.fetch_job(job_id) for job_id in job_ids]
    jobs = [serialize_job(job) for job in current_queue_jobs]

    return (total_items, jobs)
示例#27
0
def stats(request):
    queues = []
    for index, config in enumerate(QUEUES_LIST):

        queue = get_queue_by_index(index)
        connection = queue.connection

        queue_data = {
            'name': queue.name,
            'jobs': queue.count,
            'index': index,
            'connection_kwargs': connection.connection_pool.connection_kwargs
        }

        if queue.name == 'failed':
            queue_data['workers'] = '-'
            queue_data['finished_jobs'] = '-'
            queue_data['started_jobs'] = '-'
            queue_data['deferred_jobs'] = '-'

        else:
            connection = get_connection(queue.name)
            all_workers = Worker.all(connection=connection)
            queue_workers = [worker for worker in all_workers if queue in worker.queues]
            queue_data['workers'] = len(queue_workers)

            finished_job_registry = FinishedJobRegistry(queue.name, connection)
            started_job_registry = StartedJobRegistry(queue.name, connection)
            deferred_job_registry = DeferredJobRegistry(queue.name, connection)
            queue_data['finished_jobs'] = len(finished_job_registry)
            queue_data['started_jobs'] = len(started_job_registry)
            queue_data['deferred_jobs'] = len(deferred_job_registry)

        queues.append(queue_data)

    context_data = admin.site.each_context(request)
    context_data.update({
        'title': _('Django RQ'),
        'queues': queues
    })
    return render(request, 'django_rq/stats.html', context_data)
示例#28
0
def stats(request):
    queues = []
    workers_collections = collect_workers_by_connection(QUEUES_LIST)
    for index, config in enumerate(QUEUES_LIST):

        queue = get_queue_by_index(index)
        connection = queue.connection

        queue_data = {
            'name': queue.name,
            'jobs': queue.count,
            'index': index,
            'connection_kwargs': connection.connection_pool.connection_kwargs
        }

        if queue.name == 'failed':
            queue_data['workers'] = '-'
            queue_data['finished_jobs'] = '-'
            queue_data['started_jobs'] = '-'
            queue_data['deferred_jobs'] = '-'

        else:
            connection = get_connection(queue.name)
            all_workers = get_all_workers_by_configuration(
                config['connection_config'], workers_collections)
            queue_workers = [
                worker for worker in all_workers if queue in worker.queues
            ]
            queue_data['workers'] = len(queue_workers)

            finished_job_registry = FinishedJobRegistry(queue.name, connection)
            started_job_registry = StartedJobRegistry(queue.name, connection)
            deferred_job_registry = DeferredJobRegistry(queue.name, connection)
            queue_data['finished_jobs'] = len(finished_job_registry)
            queue_data['started_jobs'] = len(started_job_registry)
            queue_data['deferred_jobs'] = len(deferred_job_registry)

        queues.append(queue_data)

    context_data = {'queues': queues}
    return render(request, 'django_rq/stats.html', context_data)
示例#29
0
def empty_queue(queue_name, registry_name):
    if registry_name == "queued":
        q = Queue(queue_name)
        q.empty()
    elif registry_name == "failed":
        ids = FailedJobRegistry(queue_name).get_job_ids()
        for id in ids:
            delete_job_view(id)
    elif registry_name == "deferred":
        ids = DeferredJobRegistry(queue_name).get_job_ids()
        for id in ids:
            delete_job_view(id)
    elif registry_name == "started":
        ids = StartedJobRegistry(queue_name).get_job_ids()
        for id in ids:
            delete_job_view(id)
    elif registry_name == "finished":
        ids = FinishedJobRegistry(queue_name).get_job_ids()
        for id in ids:
            delete_job_view(id)
    return dict(status="OK")
示例#30
0
    def test_clean_registries_with_serializer(self):
        """clean_registries() cleans Started and Finished job registries (with serializer)."""

        queue = Queue(connection=self.testconn, serializer=JSONSerializer)

        finished_job_registry = FinishedJobRegistry(connection=self.testconn,
                                                    serializer=JSONSerializer)
        self.testconn.zadd(finished_job_registry.key, {'foo': 1})

        started_job_registry = StartedJobRegistry(connection=self.testconn,
                                                  serializer=JSONSerializer)
        self.testconn.zadd(started_job_registry.key, {'foo': 1})

        failed_job_registry = FailedJobRegistry(connection=self.testconn,
                                                serializer=JSONSerializer)
        self.testconn.zadd(failed_job_registry.key, {'foo': 1})

        clean_registries(queue)
        self.assertEqual(self.testconn.zcard(finished_job_registry.key), 0)
        self.assertEqual(self.testconn.zcard(started_job_registry.key), 0)
        self.assertEqual(self.testconn.zcard(failed_job_registry.key), 0)
示例#31
0
 def is_training(cls, project):
     if not cls.has_active_model(project):
         return {'is_training': False}
     m = cls.get(project)
     if cls.without_redis():
         return {
             'is_training': m.is_training,
             'backend': 'none',
             'model_version': m.model_version
         }
     else:
         started_jobs = StartedJobRegistry(
             cls._redis_queue.name,
             cls._redis_queue.connection).get_job_ids()
         finished_jobs = FinishedJobRegistry(
             cls._redis_queue.name,
             cls._redis_queue.connection).get_job_ids()
         failed_jobs = FailedJobRegistry(
             cls._redis_queue.name,
             cls._redis_queue.connection).get_job_ids()
         running_jobs = list(
             set(started_jobs) - set(finished_jobs + failed_jobs))
         logger.debug('Running jobs: ' + str(running_jobs))
         for job_id in running_jobs:
             job = Job.fetch(job_id, connection=cls._redis)
             if job.meta.get('project') == project:
                 return {
                     'is_training': True,
                     'job_id': job_id,
                     'backend': 'redis',
                     'model_version': m.model_version,
                 }
         return {
             'is_training': False,
             'backend': 'redis',
             'model_version': m.model_version
         }
示例#32
0
 def is_training(cls, project):
     if not cls.has_active_model(project):
         return {"is_training": False}
     m = cls.get(project)
     if cls.without_redis():
         return {
             "is_training": m.is_training,
             "backend": "none",
             "model_version": m.model_version,
         }
     else:
         started_jobs = StartedJobRegistry(
             cls._redis_queue.name, cls._redis_queue.connection
         ).get_job_ids()
         finished_jobs = FinishedJobRegistry(
             cls._redis_queue.name, cls._redis_queue.connection
         ).get_job_ids()
         failed_jobs = FailedJobRegistry(
             cls._redis_queue.name, cls._redis_queue.connection
         ).get_job_ids()
         running_jobs = list(set(started_jobs) - set(finished_jobs + failed_jobs))
         logger.debug("Running jobs: " + str(running_jobs))
         for job_id in running_jobs:
             job = Job.fetch(job_id, connection=cls._redis)
             if job.meta.get("project") == project:
                 return {
                     "is_training": True,
                     "job_id": job_id,
                     "backend": "redis",
                     "model_version": m.model_version,
                 }
         return {
             "is_training": False,
             "backend": "redis",
             "model_version": m.model_version,
         }
示例#33
0
    def test_timer_death_penalty(self):
        """Ensure TimerDeathPenalty works correctly."""
        q = Queue(connection=self.testconn)
        q.empty()
        finished_job_registry = FinishedJobRegistry(connection=self.testconn)
        failed_job_registry = FailedJobRegistry(connection=self.testconn)

        # make sure death_penalty_class persists
        w = TimerBasedWorker([q], connection=self.testconn)
        self.assertIsNotNone(w)
        self.assertEqual(w.death_penalty_class, TimerDeathPenalty)

        # Test short-running job doesn't raise JobTimeoutException
        job = q.enqueue(thread_friendly_sleep_func, args=(1,), job_timeout=3)
        w.work(burst=True)
        job.refresh()
        self.assertIn(job, finished_job_registry)

        # Test long-running job raises JobTimeoutException
        job = q.enqueue(thread_friendly_sleep_func, args=(5,), job_timeout=3)
        w.work(burst=True)
        self.assertIn(job, failed_job_registry)
        job.refresh()
        self.assertIn("rq.timeouts.JobTimeoutException", job.exc_info)
示例#34
0
 def finished_job_registry(self):
     """Returns this queue's FinishedJobRegistry."""
     from rq.registry import FinishedJobRegistry
     return FinishedJobRegistry(queue=self)
示例#35
0
 def setUp(self):
     super(TestFinishedJobRegistry, self).setUp()
     self.registry = FinishedJobRegistry(connection=self.testconn)