示例#1
0
def test_failed_job_max_tries_1__move_to_dlq():
    q = Queue()
    failed_q = get_failed_queue()
    dlq = Queue('dead_letter_queue')

    # we could test with one worker here, but we don't want
    # the test to depend on when the Worker performs maint.
    # tasks (before or after processing jobs)
    w = Worker([q])
    rw = RetryWorker([q], retry_config=dict(max_tries=1, delays=[]))

    # run job that will fail
    job = q.enqueue(error_fun)
    w.work(burst=True)
    assert q.count == 0
    assert get_failed_queue().count == 1

    # run retry worker
    rw.work(burst=True)
    job.refresh()

    assert q.count == 0
    assert failed_q.count == 0
    assert dlq.count == 1
    assert job.meta['tries'] == 1
示例#2
0
def overview(queue_name, page):
    if queue_name == 'failed':
        queue = get_failed_queue()
    elif queue_name is None:
        # Show the failed queue by default if it contains any jobs
        failed = get_failed_queue()
        if not failed.is_empty():
            queue = failed
        else:
            queue = Queue()
    else:
        queue = Queue(queue_name)
    r = make_response(
        render_template(
            'rq_dashboard/dashboard.html',
            workers=Worker.all(),
            queue=queue,
            page=page,
            queues=get_all_queues(),
            rq_url_prefix=url_for('.overview'),
            rq_dashboard_version=rq_dashboard_version,
            rq_version=rq_version,
        ))
    r.headers.set('Cache-Control', 'no-store')
    return r
示例#3
0
def test_failed_job_max_tries_1__move_to_dlq():
    q = Queue()
    failed_q = get_failed_queue()
    dlq = Queue('dead_letter_queue')

    # we could test with one worker here, but we don't want
    # the test to depend on when the Worker performs maint.
    # tasks (before or after processing jobs)
    w = Worker([q])
    rw = RetryWorker([q], retry_config=dict(
        max_tries=1, delays=[]))

    # run job that will fail
    job = q.enqueue(error_fun)
    w.work(burst=True)
    assert q.count == 0
    assert get_failed_queue().count == 1

    # run retry worker
    rw.work(burst=True)
    job.refresh()

    assert q.count == 0
    assert failed_q.count == 0
    assert dlq.count == 1
    assert job.meta['tries'] == 1
示例#4
0
def overview(queue_name, page, state=None):
    if queue_name == 'failed':
        queue = get_failed_queue()
    elif queue_name is None:
        # Show the failed queue by default if it contains any jobs
        failed = get_failed_queue()
        if not failed.is_empty():
            queue = failed
        else:
            queue = Queue()
    else:
        queue = Queue(queue_name)

    r = make_response(
        render_template('rq_dashboard/dashboard.html',
                        workers=Worker.all(),
                        queue=queue,
                        page=page,
                        state=state,
                        queues=get_all_queues(),
                        rq_url_prefix=url_for('.overview'),
                        newest_top=current_app.config.get(
                            'RQ_DASHBOARD_JOB_SORT_ORDER') == '-age'))
    r.headers.set('Cache-Control', 'no-store')
    return r
示例#5
0
文件: test_queue.py 项目: bradleyy/rq
    def test_requeue_nonfailed_job_fails(self):
        """Requeueing non-failed jobs raises error."""
        q = Queue()
        job = q.enqueue(say_hello, 'Nick', foo='bar')

        # Assert that we cannot requeue a job that's not on the failed queue
        with self.assertRaises(InvalidJobOperationError):
            get_failed_queue().requeue(job.id)
示例#6
0
    def test_requeue_nonfailed_job_fails(self):
        """Requeueing non-failed jobs raises error."""
        q = Queue()
        job = q.enqueue(say_hello, 'Nick', foo='bar')

        # Assert that we cannot requeue a job that's not on the failed queue
        with self.assertRaises(InvalidJobOperationError):
            get_failed_queue().requeue(job.id)
示例#7
0
    def test_requeue_sets_status_to_queued(self):
        """Requeueing a job should set its status back to QUEUED."""
        job = Job.create(func=div_by_zero, args=(1, 2, 3))
        job.save()
        get_failed_queue().quarantine(job, Exception('Some fake error'))
        get_failed_queue().requeue(job.id)

        job = Job.fetch(job.id)
        self.assertEqual(job.status, Status.QUEUED)
示例#8
0
    def test_quarantine_preserves_timeout(self):
        """Quarantine preserves job timeout."""
        job = Job.create(func=div_by_zero, args=(1, 2, 3))
        job.origin = 'fake'
        job.timeout = 200
        job.save()
        get_failed_queue().quarantine(job, Exception('Some fake error'))

        self.assertEquals(job.timeout, 200)
示例#9
0
文件: test_queue.py 项目: bradleyy/rq
    def test_quarantine_preserves_timeout(self):
        """Quarantine preserves job timeout."""
        job = Job.create(func=div_by_zero, args=(1, 2, 3))
        job.origin = 'fake'
        job.timeout = 200
        job.save()
        get_failed_queue().quarantine(job, Exception('Some fake error'))

        self.assertEquals(job.timeout, 200)
示例#10
0
文件: test_queue.py 项目: bradleyy/rq
    def test_requeue_sets_status_to_queued(self):
        """Requeueing a job should set its status back to QUEUED."""
        job = Job.create(func=div_by_zero, args=(1, 2, 3))
        job.save()
        get_failed_queue().quarantine(job, Exception('Some fake error'))
        get_failed_queue().requeue(job.id)

        job = Job.fetch(job.id)
        self.assertEqual(job.get_status(), JobStatus.QUEUED)
示例#11
0
    def setUp(self):
        super(TestRQCli, self).setUp()
        db_num = self.testconn.connection_pool.connection_kwargs['db']
        self.redis_url = 'redis://127.0.0.1:6379/%d' % db_num

        job = Job.create(func=div_by_zero, args=(1, 2, 3))
        job.origin = 'fake'
        job.save()
        get_failed_queue().quarantine(job, Exception('Some fake error'))  # noqa
示例#12
0
文件: test_queue.py 项目: yanghq23/rq
    def test_get_failed_queue(self):
        """Use custom job class"""
        class CustomJob(Job):
            pass

        failed_queue = get_failed_queue(job_class=CustomJob)
        self.assertIs(failed_queue.job_class, CustomJob)

        failed_queue = get_failed_queue(job_class='rq.job.Job')
        self.assertIsNot(failed_queue.job_class, CustomJob)
示例#13
0
    def test_requeueing_preserves_timeout(self):
        """Requeueing preserves job timeout."""
        job = Job.create(func=div_by_zero, args=(1, 2, 3))
        job.origin = "fake"
        job.timeout = 200
        job.save()
        get_failed_queue().quarantine(job, Exception("Some fake error"))
        get_failed_queue().requeue(job.id)

        job = Job.fetch(job.id)
        self.assertEquals(job.timeout, 200)
示例#14
0
def test_failed_job_max_tries_2__retry_once_then_move_to_dlq():
    q = Queue()
    q2 = Queue('not_used')
    failed_q = get_failed_queue()
    dlq = Queue('dead_letter_queue')

    w = Worker([q])

    # Here the RetryWorker not listening on an active queue: it will not
    # run any jobs, just look to requeue failed jobs.
    rw = RetryWorker([q2],
                     retry_config=dict(max_tries=2,
                                       maint_interval=0,
                                       delays=[]))

    # run job that will fail
    job = q.enqueue(error_fun)
    w.work(burst=True)
    assert q.count == 0
    assert get_failed_queue().count == 1

    # run retry worker
    rw.work(burst=True)

    # job should be requeued
    assert q.count == 1
    assert failed_q.count == 0
    assert dlq.count == 0
    job.refresh()
    assert job.meta['tries'] == 2

    # regular worker runs the job again
    w.work(burst=True)

    # job fails again
    assert q.count == 0
    assert failed_q.count == 1
    assert dlq.count == 0
    job.refresh()
    assert job.meta['tries'] == 2

    # run retry worker
    rw.work(burst=True)

    # job should be in dlq
    assert q.count == 0
    assert failed_q.count == 0
    assert dlq.count == 1
    job.refresh()
    assert job.meta['tries'] == 2
示例#15
0
def test_failed_job_max_tries_2__retry_once_then_move_to_dlq():
    q = Queue()
    q2 = Queue('not_used')
    failed_q = get_failed_queue()
    dlq = Queue('dead_letter_queue')

    w = Worker([q])

    # Here the RetryWorker not listening on an active queue: it will not
    # run any jobs, just look to requeue failed jobs.
    rw = RetryWorker([q2], retry_config=dict(
        max_tries=2, maint_interval=0, delays=[]))

    # run job that will fail
    job = q.enqueue(error_fun)
    w.work(burst=True)
    assert q.count == 0
    assert get_failed_queue().count == 1

    # run retry worker
    rw.work(burst=True)

    # job should be requeued
    assert q.count == 1
    assert failed_q.count == 0
    assert dlq.count == 0
    job.refresh()
    assert job.meta['tries'] == 2

    # regular worker runs the job again
    w.work(burst=True)

    # job fails again
    assert q.count == 0
    assert failed_q.count == 1
    assert dlq.count == 0
    job.refresh()
    assert job.meta['tries'] == 2

    # run retry worker
    rw.work(burst=True)

    # job should be in dlq
    assert q.count == 0
    assert failed_q.count == 0
    assert dlq.count == 1
    job.refresh()
    assert job.meta['tries'] == 2
示例#16
0
 def requeue_all(self):
     fq = get_failed_queue()
     job_ids = fq.job_ids
     count = len(job_ids)
     for job_id in job_ids:
         requeue_job(job_id)
     redirect(URL())
示例#17
0
def reschedule_all_failed(request):
    queue = get_failed_queue(django_rq.get_connection())

    for job in queue.jobs:
        requeue_job(job.id, connection=queue.connection)

    return HttpResponse('Success')
示例#18
0
def test_does_regular_work_like_any_good_worker():
    q = Queue()
    w = RetryWorker([q])
    q.enqueue(success_fun)
    w.work(burst=True)
    assert q.count == 0
    assert get_failed_queue().count == 0
示例#19
0
def list_regexp(options):
    r = redis.StrictRedis.from_url(REDIS_URL_RQ)

    if options.source:
        fq = Queue(options.source, connection=r)
    else:
        fq = get_failed_queue(connection=r)

    def exception_matches(regexp, job):
        exc_info = '' if job.exc_info is None else job.exc_info
        reason = exc_info.split('\n')[:-1]
        for r in reason:
            match = re.search(regexp, r)
            if match:
                return True
        return False

    jobs = fq.get_jobs()

    for regexp in options.regexp:
        jobs = [job for job in jobs if exception_matches(regexp, job)]

    for regexp in options.descr_regexp:
        jobs = [job for job in jobs if re.search(regexp, job.description)]

    for job in jobs:
        print job.id
示例#20
0
def list_timeouts(options):
    r = redis.StrictRedis.from_url(REDIS_URL_RQ)
    fq = get_failed_queue(connection=r)

    def get_timeout(job):
        exc_info = '' if job.exc_info is None else job.exc_info
        reason = exc_info.split('\n')[-2:-1]
        for r in reason:
            match = re.match('JobTimeoutException.*?(\d+)', r)
            if match:
                return int(match.group(1))
        return None

    jobs = fq.get_jobs()
    timeouts = map(get_timeout, jobs)

    timeouted_jobs = [(job, timeout) for (job, timeout) in zip(jobs, timeouts)
                      if timeout is not None]

    if options.list_nones:
        none_jobs = [job for job in fq.get_jobs() if job.exc_info is None]
        for job in none_jobs:
            print job.id
    else:
        for job, to in timeouted_jobs:
            print "%s\t\ttimeouted at: %d" % (job.id, to)
示例#21
0
def main():
    args = parse_args()

    if args.path:
        sys.path = args.path.split(':') + sys.path

    settings = {}
    if args.config:
        settings = read_config_file(args.config)

    setup_default_arguments(args, settings)

    setup_redis(args)

    try:
        if args.empty_failed_queue:
            num_jobs = get_failed_queue().empty()
            print('{} jobs removed from failed queue'.format(num_jobs))
        else:
            if args.only_queues:
                func = show_queues
            elif args.only_workers:
                func = show_workers
            else:
                func = show_both

            interval(args.interval, func, args)
    except ConnectionError as e:
        print(e)
        sys.exit(1)
    except KeyboardInterrupt:
        print()
        sys.exit(0)
示例#22
0
def list_messages():
    if request.method == 'POST':
        if request.form['action'] == 'resend':
            comment_id = request.form['comment_id']
            comment_submitted(sender=None,
                              comment=Comment.query.get(comment_id))
        elif request.form['action'] == 'rerun':
            job_id = request.form['job_id']
            redis_url = current_app.config['REDIS_URL']
            with Connection(redis.from_url(redis_url)):
                fq = get_failed_queue()
                fq.requeue(job_id)
    else:
        page = request.args.get('page', 1, type=int)
        pagination = Message.query.paginate(
            page,
            per_page=get_setting('items_per_page').value,
            error_out=False)
        messages = pagination.items
        with Connection(redis.from_url(current_app.config['REDIS_URL'])):
            queue = Queue()
        return current_plugin.render_template('list.html',
                                              messages=messages,
                                              queue=queue,
                                              pagination={
                                                  'pagination': pagination,
                                                  'fragment': {},
                                                  'url_for': plugin_url_for,
                                                  'url_for_params': {
                                                      'args': ['list'],
                                                      'kwargs': {
                                                          '_component': 'admin'
                                                      }
                                                  }
                                              })
示例#23
0
    def delete(self, id_):
        '''
        Delete a failed task with the given id_.

        :<header Content-Type: application/json
        :<header X-Auth: the client's auth token
        :query id: the job ID to delete

        :>header Content-Type: application/json

        :status 200: ok
        :status 401: authentication required
        :status 403: you must be an administrator
        :status 404: no job exists with this ID
        '''

        with rq.Connection(g.redis):
            found = False

            for job in rq.get_failed_queue().jobs:
                if job.id == id_:
                    job.delete()
                    found = True
                    break

            if not found:
                raise NotFound('No job exists with ID "%s".' % id_)

        return jsonify(message='ok')
示例#24
0
文件: web.py 项目: ducu/rq-dashboard
def requeue_all():
    fq = get_failed_queue()
    job_ids = fq.job_ids
    count = len(job_ids)
    for job_id in job_ids:
        requeue_job(job_id, connection=current_app.redis_conn)
    return dict(status='OK', count=count)
示例#25
0
def test_does_regular_work_like_any_good_worker():
    q = Queue()
    w = RetryWorker([q])
    q.enqueue(success_fun)
    w.work(burst=True)
    assert q.count == 0
    assert get_failed_queue().count == 0
示例#26
0
文件: test_worker.py 项目: selwin/rq
 def test_run_access_self(self):
     """Schedule a job, then run the worker as subprocess"""
     q = Queue()
     q.enqueue(access_self)
     subprocess.check_call(['rqworker', '-u', self.redis_url, '-b'])
     assert get_failed_queue().count == 0
     assert q.count == 0
示例#27
0
    def delete(self, id_):
        '''
        Delete a failed task with the given id_.

        :<header Content-Type: application/json
        :<header X-Auth: the client's auth token
        :query id: the job ID to delete

        :>header Content-Type: application/json

        :status 200: ok
        :status 401: authentication required
        :status 403: you must be an administrator
        :status 404: no job exists with this ID
        '''

        with rq.Connection(g.redis):
            found = False

            for job in rq.get_failed_queue().jobs:
                if job.id == id_:
                    job.delete()
                    found = True
                    break

            if not found:
                raise NotFound('No job exists with ID "%s".' % id_)

        return jsonify(message='ok')
示例#28
0
    def set_up(cls, config):

        for queue_name in ("default", "email", "kpi"):
            queue = Queue(connection=Redis(
                host=config["host"],
                port=int(config["port"]),
                password=config["password"],
            ),
                          **config["queues"][queue_name])
            setattr(cls, queue_name, queue)

        cls.scheduler = Scheduler(connection=Redis(
            host=config["host"],
            port=int(config["port"]),
            password=config["password"],
        ))

        cls.failed_queue = get_failed_queue(connection=Redis(
            host=config["host"],
            port=int(config["port"]),
            password=config["password"],
        ))

        push_connection(
            Redis(
                host=config["host"],
                port=int(config["port"]),
                password=config["password"],
            ))
示例#29
0
文件: test_worker.py 项目: selwin/rq
    def test_self_modification_persistence_with_error(self):
        """Make sure that any meta modification done by
        the job itself persists completely through the
        queue/worker/job stack -- even if the job errored"""
        q = Queue()
        failed_q = get_failed_queue()
        # Also make sure that previously existing metadata
        # persists properly
        job = q.enqueue(modify_self_and_error, meta={'foo': 'bar', 'baz': 42},
                        args=[{'baz': 10, 'newinfo': 'waka'}])

        w = Worker([q])
        w.work(burst=True)

        # Postconditions
        self.assertEqual(q.count, 0)
        self.assertEqual(failed_q.count, 1)
        self.assertEqual(w.get_current_job_id(), None)

        job_check = Job.fetch(job.id)
        self.assertEqual(set(job_check.meta.keys()),
                         set(['foo', 'baz', 'newinfo']))
        self.assertEqual(job_check.meta['foo'], 'bar')
        self.assertEqual(job_check.meta['baz'], 10)
        self.assertEqual(job_check.meta['newinfo'], 'waka')
示例#30
0
文件: test_worker.py 项目: askyer/rq
    def test_work_fails(self):
        """Failing jobs are put on the failed queue."""
        q = Queue()
        failed_q = get_failed_queue()

        # Preconditions
        self.assertEquals(failed_q.count, 0)
        self.assertEquals(q.count, 0)

        # Action
        job = q.enqueue(div_by_zero)
        self.assertEquals(q.count, 1)

        # keep for later
        enqueued_at_date = strip_microseconds(job.enqueued_at)

        w = Worker([q])
        w.work(burst=True)  # should silently pass

        # Postconditions
        self.assertEquals(q.count, 0)
        self.assertEquals(failed_q.count, 1)

        # Check the job
        job = Job.fetch(job.id)
        self.assertEquals(job.origin, q.name)

        # Should be the original enqueued_at date, not the date of enqueueing
        # to the failed queue
        self.assertEquals(job.enqueued_at, enqueued_at_date)
        self.assertIsNotNone(job.exc_info)  # should contain exc_info
示例#31
0
    def test_self_modification_persistence_with_error(self):
        """Make sure that any meta modification done by
        the job itself persists completely through the
        queue/worker/job stack -- even if the job errored"""
        q = Queue()
        failed_q = get_failed_queue()
        # Also make sure that previously existing metadata
        # persists properly
        job = q.enqueue(modify_self_and_error,
                        meta={
                            'foo': 'bar',
                            'baz': 42
                        },
                        args=[{
                            'baz': 10,
                            'newinfo': 'waka'
                        }])

        w = Worker([q])
        w.work(burst=True)

        # Postconditions
        self.assertEqual(q.count, 0)
        self.assertEqual(failed_q.count, 1)
        self.assertEqual(w.get_current_job_id(), None)

        job_check = Job.fetch(job.id)
        self.assertEqual(set(job_check.meta.keys()),
                         set(['foo', 'baz', 'newinfo']))
        self.assertEqual(job_check.meta['foo'], 'bar')
        self.assertEqual(job_check.meta['baz'], 10)
        self.assertEqual(job_check.meta['newinfo'], 'waka')
示例#32
0
    def test_work_is_unreadable(self):
        """Unreadable jobs are put on the failed queue."""
        q = Queue()
        failed_q = get_failed_queue()

        self.assertEquals(failed_q.count, 0)
        self.assertEquals(q.count, 0)

        # NOTE: We have to fake this enqueueing for this test case.
        # What we're simulating here is a call to a function that is not
        # importable from the worker process.
        job = Job.create(func=div_by_zero, args=(3, ))
        job.save()
        data = self.testconn.hget(job.key, 'data')
        invalid_data = data.replace(b'div_by_zero', b'nonexisting')
        assert data != invalid_data
        self.testconn.hset(job.key, 'data', invalid_data)

        # We use the low-level internal function to enqueue any data (bypassing
        # validity checks)
        q.push_job_id(job.id)

        self.assertEquals(q.count, 1)

        # All set, we're going to process it
        w = Worker([q])
        w.work(burst=True)  # should silently pass
        self.assertEquals(q.count, 0)
        self.assertEquals(failed_q.count, 1)
示例#33
0
 def test_run_scheduled_access_self(self):
     """Schedule a job that schedules a job, then run the worker as subprocess"""
     q = Queue()
     q.enqueue(schedule_access_self)
     subprocess.check_call(['rqworker', '-u', self.redis_url, '-b'])
     assert get_failed_queue().count == 0
     assert q.count == 0
示例#34
0
    def test_custom_exc_handling(self):
        """Custom exception handling."""
        def black_hole(job, *exc_info):
            # Don't fall through to default behaviour (moving to failed queue)
            return False

        q = Queue()
        failed_q = get_failed_queue()

        # Preconditions
        self.assertEquals(failed_q.count, 0)
        self.assertEquals(q.count, 0)

        # Action
        job = q.enqueue(div_by_zero)
        self.assertEquals(q.count, 1)

        w = Worker([q], exc_handler=black_hole)
        w.work(burst=True)  # should silently pass

        # Postconditions
        self.assertEquals(q.count, 0)
        self.assertEquals(failed_q.count, 0)

        # Check the job
        job = Job.fetch(job.id)
        self.assertEquals(job.is_failed, True)
示例#35
0
def requeue_all():
    fq = get_failed_queue()
    job_ids = fq.job_ids
    count = len(job_ids)
    for job_id in job_ids:
        requeue_job(job_id)
    return dict(status='OK', count=count)
示例#36
0
    def test_work_fails(self):
        """Failing jobs are put on the failed queue."""
        q = Queue()
        failed_q = get_failed_queue()

        # Preconditions
        self.assertEquals(failed_q.count, 0)
        self.assertEquals(q.count, 0)

        # Action
        job = q.enqueue(div_by_zero)
        self.assertEquals(q.count, 1)

        # keep for later
        enqueued_at_date = strip_microseconds(job.enqueued_at)

        w = Worker([q])
        w.work(burst=True)  # should silently pass

        # Postconditions
        self.assertEquals(q.count, 0)
        self.assertEquals(failed_q.count, 1)

        # Check the job
        job = Job.fetch(job.id)
        self.assertEquals(job.origin, q.name)

        # Should be the original enqueued_at date, not the date of enqueueing
        # to the failed queue
        self.assertEquals(job.enqueued_at, enqueued_at_date)
        self.assertIsNotNone(job.exc_info)  # should contain exc_info
示例#37
0
文件: web.py 项目: joowani/rq-dash
def requeue_2xtimeout_job_view(job_id):
    # Get the handle for the failed queue
    fq = get_failed_queue()
    # Fetch the job from the failed queue
    job = fq.fetch_job(job_id)
    # Test if the job exists
    if job is None:
        raise NoSuchJobError(
            'Job {} does not exist in failed queue'.format(job_id)
        )
    # Remove the job from the failed queue
    if fq.remove(job_id) == 0:
        raise InvalidJobOperationError('Cannot requeue non-failed jobs')
    # Reset the job state
    job.set_status(JobStatus.QUEUED)
    job.exc_info = None
    if not job.timeout:
        job.timeout = Queue.DEFAULT_TIMEOUT
    # Double the timeout
    job.timeout *= 2
    # Get a handle for the original queue
    q = Queue(job.origin, connection=job.connection)
    # Queue the job
    q.enqueue_job(job)
    return dict(status='OK')
示例#38
0
def requeue_all():
    fq = get_failed_queue()
    job_ids = fq.job_ids
    count = len(job_ids)
    for job_id in job_ids:
        requeue_job(job_id, connection=current_app.redis_conn)
    return dict(status='OK', count=count)
示例#39
0
文件: test_worker.py 项目: askyer/rq
    def test_custom_exc_handling(self):
        """Custom exception handling."""
        def black_hole(job, *exc_info):
            # Don't fall through to default behaviour (moving to failed queue)
            return False

        q = Queue()
        failed_q = get_failed_queue()

        # Preconditions
        self.assertEquals(failed_q.count, 0)
        self.assertEquals(q.count, 0)

        # Action
        job = q.enqueue(div_by_zero)
        self.assertEquals(q.count, 1)

        w = Worker([q], exc_handler=black_hole)
        w.work(burst=True)  # should silently pass

        # Postconditions
        self.assertEquals(q.count, 0)
        self.assertEquals(failed_q.count, 0)

        # Check the job
        job = Job.fetch(job.id)
        self.assertEquals(job.is_failed, True)
示例#40
0
def get_queue(queue_name):
    if queue_name == 'failed':
        return get_failed_queue()
    elif queue_name == 'scheduled_jobs':
        return SchedulerQueue()
    else:
        return Queue(queue_name)
示例#41
0
文件: test_worker.py 项目: askyer/rq
    def test_work_is_unreadable(self):
        """Unreadable jobs are put on the failed queue."""
        q = Queue()
        failed_q = get_failed_queue()

        self.assertEquals(failed_q.count, 0)
        self.assertEquals(q.count, 0)

        # NOTE: We have to fake this enqueueing for this test case.
        # What we're simulating here is a call to a function that is not
        # importable from the worker process.
        job = Job.create(func=div_by_zero, args=(3,))
        job.save()
        data = self.testconn.hget(job.key, 'data')
        invalid_data = data.replace(b'div_by_zero', b'nonexisting')
        assert data != invalid_data
        self.testconn.hset(job.key, 'data', invalid_data)

        # We use the low-level internal function to enqueue any data (bypassing
        # validity checks)
        q.push_job_id(job.id)

        self.assertEquals(q.count, 1)

        # All set, we're going to process it
        w = Worker([q])
        w.work(burst=True)   # should silently pass
        self.assertEquals(q.count, 0)
        self.assertEquals(failed_q.count, 1)
示例#42
0
def requeue(cli_config, all, job_class, job_ids, **options):
    """Requeue failed jobs."""

    failed_queue = get_failed_queue(connection=cli_config.connection,
                                    job_class=cli_config.job_class)

    if all:
        job_ids = failed_queue.job_ids

    if not job_ids:
        click.echo('Nothing to do')
        sys.exit(0)

    click.echo('Requeueing {0} jobs from failed queue'.format(len(job_ids)))
    fail_count = 0
    with click.progressbar(job_ids) as job_ids:
        for job_id in job_ids:
            try:
                failed_queue.requeue(job_id)
            except InvalidJobOperationError:
                fail_count += 1

    if fail_count > 0:
        click.secho(
            'Unable to requeue {0} jobs from failed queue'.format(fail_count),
            fg='red')
示例#43
0
文件: app.py 项目: olaputin/ptl_tool
def get_operation_status():
    result = {}
    for op in OPERATIONS:
        q = Queue(op, connection=redis_connection())
        result[op] = [job_to_dict(j) for j in q.jobs]
    fq = get_failed_queue(redis_connection())
    result['failed'] = [job_to_dict(j) for j in fq.jobs]
    return make_response(body=result)
示例#44
0
    def __init__(self):
        self.redis_url = os.getenv('REDISTOGO_URL', 'redis:#localhost:6379')
        self.conn = redis.from_url(self.redis_url)
        self.queue = Queue(connection=self.conn)
        self.listen = ['default']
        self.failed_queue = get_failed_queue(connection=self.conn)
        self.scheduler = BackgroundScheduler()

        return None
示例#45
0
def failed_jobs_by_queue(queue):
    failed = []
    fq = get_failed_queue(redisClient)
    for id in fq.get_job_ids():
        try:
            if Job.fetch(str(id), connection=redisClient).origin == queue.name:
                failed.append(str(id))
        except:
            continue
    return failed
示例#46
0
 def test_run_scheduled_access_self(self):
     """Schedule a job that schedules a job, then run the worker as subprocess"""
     if 'pypy' in sys.version.lower():
         # horrible bodge until we drop 2.6 support and can use skipIf
         return
     q = Queue()
     q.enqueue(schedule_access_self)
     subprocess.check_call(['rqworker', '-u', self.redis_url, '-b'])
     assert get_failed_queue().count == 0
     assert q.count == 0
示例#47
0
    def list_queues(self):
	 with Connection(self.redis_conn):
	  l=[]
	  if len(get_failed_queue().jobs) > 0:
	     l.append({
		'worker': "N/A",
		'name': 'failed',
		'jobs': len(get_failed_queue().jobs),
		'link': URL("admin", "jobs", args=["failed"])})
	     
	  for w in Worker.all():
	    for q in w.queues:
	      l.append({
		'worker': w.name,
		'name': q.name,
		'jobs': q.count,
		'link': URL("admin", "jobs", args=[q.name])})
	 
	  return l
示例#48
0
文件: test_worker.py 项目: selwin/rq
 def test_run_scheduled_access_self(self):
     """Schedule a job that schedules a job, then run the worker as subprocess"""
     if 'pypy' in sys.version.lower():
         # horrible bodge until we drop 2.6 support and can use skipIf
         return
     q = Queue()
     q.enqueue(schedule_access_self)
     subprocess.check_call(['rqworker', '-u', self.redis_url, '-b'])
     assert get_failed_queue().count == 0
     assert q.count == 0
示例#49
0
    def check_failed_queue(self, redis_conn):
        """
        Requeue all jobs in the Failed job queue
        """
        with Connection(redis_conn):
            failed_queue = get_failed_queue()

            # TODO: Need to keep track of number of attempts a job is requeued
            for job in failed_queue.get_jobs():
                self.logger.info("Requeued: " + str(job.id))
                failed_queue.requeue(job.id)
示例#50
0
文件: views.py 项目: vegten/OIPA
def reschedule_all_failed(request):

    from rq import requeue_job
    from rq import get_failed_queue
    from django_rq import get_connection

    queue = get_failed_queue(get_connection())

    for job in queue.jobs:
        requeue_job(job.id, connection=queue.connection)

    return HttpResponse('Success')
示例#51
0
文件: web.py 项目: ducu/rq-dashboard
def overview(queue_name, page):
    if queue_name == 'failed':
        queue = get_failed_queue()
    elif queue_name is None:
        # Show the failed queue by default if it contains any jobs
        failed = get_failed_queue()
        if not failed.is_empty():
            queue = failed
        else:
            queue = Queue()
    else:
        queue = Queue(queue_name)
    r = make_response(render_template(
        'rq_dashboard/dashboard.html',
        workers=Worker.all(),
        queue=queue,
        page=page,
        queues=get_all_queues(),
        rq_url_prefix=url_for('.overview')
    ))
    r.headers.set('Cache-Control', 'no-store')
    return r
示例#52
0
def test_retry_worker_as_only_worker():
    q = Queue()
    rw = RetryWorker([q], retry_config=dict(
        max_tries=4, maint_interval=0, delays=[]))
    job = q.enqueue(error_fun)

    for _ in range(1, 20):
        rw.work(burst=True)

    job.refresh()
    assert q.count == 0
    assert get_failed_queue().count == 0
    assert Queue('dead_letter_queue').count == 1
    assert job.meta['tries'] == 4
示例#53
0
def test_no_rq_scheduler_falls_back_to_immediate_retry():
    q = Queue()
    q2 = Queue('not_used')

    w = Worker([q])
    rw = get_retry_worker()([q2], retry_config=dict(
        max_tries=2, retry_delays=[1], maint_interval=0))

    # run job that will fail
    job = q.enqueue(error_fun)
    w.work(burst=True)
    assert q.count == 0
    assert get_failed_queue().count == 1

    # run retry worker
    rw.work(burst=True)

    # job should be requeued since rq_scheduler cannot be imported
    assert q.count == 1
    assert get_failed_queue().count == 0
    assert Queue('dead_letter_queue').count == 0
    job.refresh()
    assert job.meta['tries'] == 2
示例#54
0
def requeue():
    """Requeue all failed jobs in failed queue"""
    failed_queue = get_failed_queue()
    job_ids = failed_queue.job_ids
    click.echo('Requeue failed jobs: {}'.format(len(job_ids)))
    requeue_failed_num = 0
    with click.progressbar(job_ids) as job_bar:
        for job_id in job_bar:
            try:
                failed_queue.requeue(job_id)
            except InvalidJobOperationError:
                requeue_failed_num += 1

    click.secho('Requeue failed: {}'.format(
        requeue_failed_num), fg='red')
示例#55
0
 def get_context_data(self, **kwargs):
     ctx = super(JobDetails, self).get_context_data(**kwargs)
     try:
         job = Job.fetch(self.kwargs['job'], connection=self.connection)
     except NoSuchJobError:
         raise Http404
     if job.is_failed:
         queue = get_failed_queue(connection=self.connection)
     else:
         queue = Queue(job.origin, connection=self.connection)
     ctx.update({
         'job': serialize_job(job),
         'queue': queue,
         'title': _('Job %s') % job.id,
     })
     return ctx
示例#56
0
文件: test_queue.py 项目: bradleyy/rq
    def test_requeue_job(self):
        """Requeueing existing jobs."""
        job = Job.create(func=div_by_zero, args=(1, 2, 3))
        job.origin = 'fake'
        job.save()
        get_failed_queue().quarantine(job, Exception('Some fake error'))  # noqa

        self.assertEqual(Queue.all(), [get_failed_queue()])  # noqa
        self.assertEquals(get_failed_queue().count, 1)

        get_failed_queue().requeue(job.id)

        self.assertEquals(get_failed_queue().count, 0)
        self.assertEquals(Queue('fake').count, 1)
示例#57
0
def requeue_job_view(job_id):

    # Just try both failed queues... don't care about efficiency for single job retries
    timeout_queue = RqTimeoutQueue()
    failed_queue = get_failed_queue()

    try:
        failed_queue.requeue(job_id)
    except:
        pass
    try:
        timeout_queue.requeue(job_id)
    except:
        pass

    return dict(status='OK')