コード例 #1
0
ファイル: test_registry.py プロジェクト: nvie/rq
    def test_default_failure_ttl(self):
        """Job TTL defaults to DEFAULT_FAILURE_TTL"""
        queue = Queue(connection=self.testconn)
        job = queue.enqueue(say_hello)

        registry = FailedJobRegistry(connection=self.testconn)
        key = registry.key

        timestamp = current_timestamp()
        registry.add(job)
        self.assertLess(
            self.testconn.zscore(key, job.id),
            timestamp + DEFAULT_FAILURE_TTL + 2
        )
        self.assertGreater(
            self.testconn.zscore(key, job.id),
            timestamp + DEFAULT_FAILURE_TTL - 2
        )

        timestamp = current_timestamp()
        ttl = 5
        registry.add(job, ttl=5)
        self.assertLess(
            self.testconn.zscore(key, job.id),
            timestamp + ttl + 2
        )
        self.assertGreater(
            self.testconn.zscore(key, job.id),
            timestamp + ttl - 2
        )
コード例 #2
0
ファイル: views.py プロジェクト: tom-price/django-rq
def failed_jobs(request, queue_index):
    queue_index = int(queue_index)
    queue = get_queue_by_index(queue_index)

    registry = FailedJobRegistry(queue.name, queue.connection)

    items_per_page = 100
    num_jobs = len(registry)
    page = int(request.GET.get('page', 1))
    jobs = []

    if num_jobs > 0:
        last_page = int(ceil(num_jobs / items_per_page))
        page_range = range(1, last_page + 1)
        offset = items_per_page * (page - 1)
        job_ids = registry.get_job_ids(offset, offset + items_per_page - 1)
        jobs = get_jobs(queue, job_ids, registry)

    else:
        page_range = []

    context_data = {
        'queue': queue,
        'queue_index': queue_index,
        'jobs': jobs,
        'num_jobs': num_jobs,
        'page': page,
        'page_range': page_range,
        'job_status': 'Failed',
    }
    return render(request, 'django_rq/jobs.html', context_data)
コード例 #3
0
ファイル: rediz.py プロジェクト: jpobeda/netpalm
 def getfailedjobs(self, q):
     try:
         registry = FailedJobRegistry(q, connection=self.base_connection)
         response_object = registry.get_job_ids()
         return response_object
     except Exception as e:
         return e
コード例 #4
0
def requeue(cli_config, queue, all, job_class, job_ids, **options):
    """Requeue failed jobs."""

    failed_job_registry = FailedJobRegistry(queue,
                                            connection=cli_config.connection)
    if all:
        job_ids = failed_job_registry.get_job_ids()

    if not job_ids:
        click.echo('Nothing to do')
        sys.exit(0)

    click.echo('Requeueing {0} jobs from failed queue'.format(len(job_ids)))
    fail_count = 0
    with click.progressbar(job_ids) as job_ids:
        for job_id in job_ids:
            try:
                failed_job_registry.requeue(job_id)
            except InvalidJobOperationError:
                fail_count += 1

    if fail_count > 0:
        click.secho(
            'Unable to requeue {0} jobs from failed job registry'.format(
                fail_count),
            fg='red')
コード例 #5
0
def check_scrape_job(scrape_id: str, scraper: Scraper):
    from karim import telegram_bot as bot
    failed = FailedJobRegistry(queue=queue)

    if scrape_id in failed.get_job_ids():
        # job failed
        bot.send_message(scraper.get_user_id(), failed_scraping_ig_text)
        sheet.log(datetime.utcnow(),
                  scraper.get_user_id(),
                  action='FAILED SCRAPE')
        return False
    else:
        redis_url = os.getenv('REDISTOGO_URL', 'redis://localhost:6379')
        conn = redis.from_url(redis_url)
        job = Job.fetch(scrape_id, connection=conn)
        result = job.result

        # Save result in sheets
        sheet.add_scrape(scraper.get_target(),
                         name=scraper.get_name(),
                         scraped=result)
        sheet.log(datetime.utcnow(),
                  scraper.get_user_id(),
                  action='SUCCESSFUL SCRAPE')
        # Update user
        markup = InlineKeyboardMarkup([[
            InlineKeyboardButton(text='Google Sheets',
                                 url=sheet.get_sheet_url(1))
        ]])
        bot.send_message(scraper.get_user_id(),
                         finished_scrape_text,
                         reply_markup=markup)
        return True
コード例 #6
0
ファイル: views.py プロジェクト: Yolley/django-rq
def requeue_all(request, queue_index):
    queue_index = int(queue_index)
    queue = get_queue_by_index(queue_index)
    registry = FailedJobRegistry(queue=queue)

    if request.method == 'POST':
        job_ids = registry.get_job_ids()
        count = 0
        # Confirmation received
        for job_id in job_ids:
            try:
                requeue_job(job_id, connection=queue.connection)
                count += 1
            except NoSuchJobError:
                pass

        messages.info(request,
                      'You have successfully requeued %d jobs!' % count)
        return redirect('rq_jobs', queue_index)

    context_data = {
        'queue_index': queue_index,
        'queue': queue,
        'total_jobs': len(registry),
    }

    return render(request, 'django_rq/requeue_all.html', context_data)
コード例 #7
0
    def fail_dependents(self, job, pipeline=None):
        """Fails all jobs in the given job's dependents set and clears it.

        When called without a pipeline, this method uses WATCH/MULTI/EXEC.
        If you pass a pipeline, only MULTI is called. The rest is up to the
        caller.
        """
        from .registry import DeferredJobRegistry, FailedJobRegistry

        pipe = pipeline if pipeline is not None else self.connection.pipeline()
        dependents_key = job.dependents_key

        while True:
            try:
                # if a pipeline is passed, the caller is responsible for calling WATCH
                # to ensure all jobs are enqueued
                if pipeline is None:
                    pipe.watch(dependents_key)

                dependent_job_ids = [as_text(_id)
                                     for _id in pipe.smembers(dependents_key)]

                jobs_to_fail = self.job_class.fetch_many(
                    dependent_job_ids,
                    connection=self.connection
                )

                pipe.multi()

                for dependent in jobs_to_fail:
                    deferred_job_registry = DeferredJobRegistry(dependent.origin,
                                                                self.connection,
                                                                job_class=self.job_class)
                    deferred_job_registry.remove(dependent, pipeline=pipe)

                    dependent.set_status(JobStatus.FAILED, pipeline=pipe)

                    failed_job_registry = FailedJobRegistry(dependent.origin, dependent.connection,
                                                            job_class=self.job_class)
                    failed_job_registry.add(dependent, ttl=dependent.failure_ttl,
                                            exc_string="Dependency has failed!", pipeline=pipe)

                    self.fail_dependents(job=dependent)

                pipe.delete(dependents_key)

                if pipeline is None:
                    pipe.execute()

                break
            except WatchError:
                if pipeline is None:
                    continue
                else:
                    # if the pipeline comes from the caller, we re-raise the
                    # exception as it it the responsibility of the caller to
                    # handle it
                    raise
        return len(dependent_job_ids)
コード例 #8
0
ファイル: redisqueue.py プロジェクト: splogamurugan/SIP
 def requeue_all(self):
     with Connection(redis.from_url(self.url)):
         q = Queue()
         reg = FailedJobRegistry()
         tasks = reg.get_job_ids()
         for task in tasks:
             print(task)
             requeue_job(task, q.connection)
コード例 #9
0
 def getFailedExperiments(cls):
     with Connection(redis.from_url(
             current_app.config['REDIS_URL'])) as conn:
         registry = FailedJobRegistry('default', connection=conn)
         return [
             Job.fetch(id, connection=conn)
             for id in registry.get_job_ids()
         ]
コード例 #10
0
ファイル: test_registry.py プロジェクト: nvie/rq
    def test_invalid_job(self):
        """Requeuing a job that's not in FailedJobRegistry raises an error."""
        queue = Queue(connection=self.testconn)
        job = queue.enqueue(say_hello)

        registry = FailedJobRegistry(connection=self.testconn)
        with self.assertRaises(InvalidJobOperation):
            registry.requeue(job)
コード例 #11
0
ファイル: test_registry.py プロジェクト: zachgoulet/rq
    def test_invalid_job(self):
        """Requeuing a job that's not in FailedJobRegistry raises an error."""
        queue = Queue(connection=self.testconn)
        job = queue.enqueue(say_hello)

        registry = FailedJobRegistry(connection=self.testconn)
        with self.assertRaises(InvalidJobOperation):
            registry.requeue(job)
コード例 #12
0
 def getfailedjobs(self, q):
     """returns list of failed redis jobs"""
     log.info(f"getting failed jobs: {q}")
     try:
         registry = FailedJobRegistry(q, connection=self.base_connection)
         response_object = registry.get_job_ids()
         return response_object
     except Exception as e:
         return e
コード例 #13
0
ファイル: jobs.py プロジェクト: apentecoste/app-template
    def get(self):
        pending = queue.get_job_ids()

        registry = StartedJobRegistry('default', connection=broker)
        started = registry.get_job_ids()

        fail_queue = FailedJobRegistry(connection=broker)
        failed = fail_queue.get_job_ids()

        return {"jobs": started + pending, "failed": failed}
コード例 #14
0
def index():
    q_len = len(q)
    jobs = q.jobs

    registry_failed = FailedJobRegistry(queue=q)
    failed_jobs = []
    for job_id in registry_failed.get_job_ids():
        failed_jobs.append(q.fetch_job(job_id))

    return render_template("index.html", jobs=jobs, q_len=q_len, failed_jobs=failed_jobs,
                           failed_len=registry_failed.count)
コード例 #15
0
def check_failed_rq_jobs(queue_name='monitoring_tasks', delete_job=False):
    """This function will print out jobs that failed to execute on RQ 's task queue"""
    queue = Queue(connection=app.redis, name=queue_name)
    registry = FailedJobRegistry(queue=queue)
    # This is how to remove a job from a registry
    for job_id in registry.get_job_ids():
        # Get job whose ID is given
        job = Job.fetch(job_id, connection=app.redis)
        # Print out the job's exception stacktrace
        system_logging(f'\n{job.__dict__["exc_info"]}\n------------------------------------------\n', True, 'redis.log')
        # Remove from registry and delete job
        registry.remove(job_id, delete_job=delete_job)
コード例 #16
0
def check_dm_job(identifier: str, forwarder: Forwarder):
    print('TELEBOT: Check DM Job Initiated')
    from karim import telegram_bot as bot
    failed = FailedJobRegistry(queue=queue)

    count = 0
    for id in failed.get_job_ids():
        if identifier in id and DM in id:
            count += 1

    bot.send_message(forwarder.get_user_id(),
                     finished_sending_dm_text.format(count))
    return True
コード例 #17
0
def _get_failed_jobs(connection):
    queues = Queue.all(connection=connection)
    failed_jobs = []

    for q in queues:
        registry = FailedJobRegistry(q.name, connection=connection)

        job_ids = registry.get_job_ids()

        for id in job_ids:
            j = rq_job.Job.fetch(id, connection=connection)
            failed_jobs.append(_create_failed_job_obj(j))

    return failed_jobs
コード例 #18
0
ファイル: test_registry.py プロジェクト: nvie/rq
    def test_requeue(self):
        """FailedJobRegistry.requeue works properly"""
        queue = Queue(connection=self.testconn)
        job = queue.enqueue(div_by_zero, failure_ttl=5)

        worker = Worker([queue])
        worker.work(burst=True)

        registry = FailedJobRegistry(connection=worker.connection)
        self.assertTrue(job in registry)

        registry.requeue(job.id)
        self.assertFalse(job in registry)
        self.assertIn(job.id, queue.get_job_ids())

        job.refresh()
        self.assertEqual(job.get_status(), JobStatus.QUEUED)

        worker.work(burst=True)
        self.assertTrue(job in registry)

        # Should also work with job instance
        registry.requeue(job)
        self.assertFalse(job in registry)
        self.assertIn(job.id, queue.get_job_ids())

        job.refresh()
        self.assertEqual(job.get_status(), JobStatus.QUEUED)

        worker.work(burst=True)
        self.assertTrue(job in registry)

        # requeue_job should work the same way
        requeue_job(job.id, connection=self.testconn)
        self.assertFalse(job in registry)
        self.assertIn(job.id, queue.get_job_ids())

        job.refresh()
        self.assertEqual(job.get_status(), JobStatus.QUEUED)

        worker.work(burst=True)
        self.assertTrue(job in registry)

        # And so does job.requeue()
        job.requeue()
        self.assertFalse(job in registry)
        self.assertIn(job.id, queue.get_job_ids())

        job.refresh()
        self.assertEqual(job.get_status(), JobStatus.QUEUED)
コード例 #19
0
def navbar_stats():
    """
    Retrieve object counts used for the navbar
    """
    # Check cache first
    redis = get_redis_connection()
    result = redis.get("navbar_stats")

    if result:
        result = json.loads(result)
        return jsonify(result)

    queues = (get_queue(QueueType.DOWNLOAD_OBJECT),
              get_queue(QueueType.CREATE_SIP), get_queue(QueueType.SUBMIT_SIP),
              get_queue(QueueType.CONFIRM_SIP))
    result = {"queues": {}}
    for queue in queues:
        result["queues"][queue.name] = {
            "pending": queue.count,
            "processing": StartedJobRegistry(queue=queue).count
        }

    # Add failed
    result["failed"] = sum(
        [FailedJobRegistry(queue=queue).count for queue in queues])

    # Cache result for 2 seconds
    redis.set("navbar_stats", json.dumps(result), ex=2)
    return jsonify(result)
コード例 #20
0
def purge_failed_jobs():
    with Connection(rq_redis_connection):
        queues = [
            q for q in Queue.all() if q.name not in default_operational_queues
        ]
        for queue in queues:
            failed_job_ids = FailedJobRegistry(queue=queue).get_job_ids()
            failed_jobs = Job.fetch_many(failed_job_ids, rq_redis_connection)
            stale_jobs = []
            for failed_job in failed_jobs:
                # the job may not actually exist anymore in Redis
                if not failed_job:
                    continue
                # the job could have an empty ended_at value in case
                # of a worker dying before it can save the ended_at value,
                # in which case we also consider them stale
                if not failed_job.ended_at:
                    stale_jobs.append(failed_job)
                elif (datetime.utcnow() - failed_job.ended_at
                      ).total_seconds() > settings.JOB_DEFAULT_FAILURE_TTL:
                    stale_jobs.append(failed_job)

            for stale_job in stale_jobs:
                stale_job.delete()

            if stale_jobs:
                logger.info(
                    "Purged %d old failed jobs from the %s queue.",
                    len(stale_jobs),
                    queue.name,
                )
コード例 #21
0
ファイル: test_worker.py プロジェクト: z-hermit/rq
    def test_self_modification_persistence_with_error(self):
        """Make sure that any meta modification done by
        the job itself persists completely through the
        queue/worker/job stack -- even if the job errored"""
        q = Queue()
        # Also make sure that previously existing metadata
        # persists properly
        job = q.enqueue(modify_self_and_error,
                        meta={
                            'foo': 'bar',
                            'baz': 42
                        },
                        args=[{
                            'baz': 10,
                            'newinfo': 'waka'
                        }])

        w = Worker([q])
        w.work(burst=True)

        # Postconditions
        self.assertEqual(q.count, 0)
        failed_job_registry = FailedJobRegistry(queue=q)
        self.assertTrue(job in failed_job_registry)
        self.assertEqual(w.get_current_job_id(), None)

        job_check = Job.fetch(job.id)
        self.assertEqual(set(job_check.meta.keys()),
                         set(['foo', 'baz', 'newinfo']))
        self.assertEqual(job_check.meta['foo'], 'bar')
        self.assertEqual(job_check.meta['baz'], 10)
        self.assertEqual(job_check.meta['newinfo'], 'waka')
コード例 #22
0
def serialize_queues(instance_number, queues):
    return [
        dict(
            name=q.name,
            count=q.count,
            queued_url=url_for(
                ".jobs_overview",
                instance_number=instance_number,
                queue_name=q.name,
                registry_name="queued",
                per_page="8",
                page="1",
            ),
            failed_job_registry_count=FailedJobRegistry(q.name).count,
            failed_url=url_for(
                ".jobs_overview",
                instance_number=instance_number,
                queue_name=q.name,
                registry_name="failed",
                per_page="8",
                page="1",
            ),
            started_job_registry_count=StartedJobRegistry(q.name).count,
            started_url=url_for(
                ".jobs_overview",
                instance_number=instance_number,
                queue_name=q.name,
                registry_name="started",
                per_page="8",
                page="1",
            ),
            scheduled_job_registry_count=ScheduledJobRegistry(q.name).count,
            scheduled_url=url_for(
                ".jobs_overview",
                instance_number=instance_number,
                queue_name=q.name,
                registry_name="scheduled",
                per_page="8",
                page="1",
            ),
            deferred_job_registry_count=DeferredJobRegistry(q.name).count,
            deferred_url=url_for(
                ".jobs_overview",
                instance_number=instance_number,
                queue_name=q.name,
                registry_name="deferred",
                per_page="8",
                page="1",
            ),
            finished_job_registry_count=FinishedJobRegistry(q.name).count,
            finished_url=url_for(
                ".jobs_overview",
                instance_number=instance_number,
                queue_name=q.name,
                registry_name="finished",
                per_page="8",
                page="1",
            ),
        ) for q in queues
    ]
コード例 #23
0
ファイル: test_worker.py プロジェクト: naga-chandana/rq
    def test_handle_retry(self):
        """handle_job_failure() handles retry properly"""
        connection = self.testconn
        queue = Queue(connection=connection)
        retry = Retry(max=2)
        job = queue.enqueue(div_by_zero, retry=retry)
        registry = FailedJobRegistry(queue=queue)

        worker = Worker([queue])

        # If job if configured to retry, it will be put back in the queue
        # and not put in the FailedJobRegistry.
        # This is the original execution
        queue.empty()
        worker.handle_job_failure(job, queue)
        job.refresh()
        self.assertEqual(job.retries_left, 1)
        self.assertEqual([job.id], queue.job_ids)
        self.assertFalse(job in registry)

        # First retry
        queue.empty()
        worker.handle_job_failure(job, queue)
        job.refresh()
        self.assertEqual(job.retries_left, 0)
        self.assertEqual([job.id], queue.job_ids)

        # Second retry
        queue.empty()
        worker.handle_job_failure(job, queue)
        job.refresh()
        self.assertEqual(job.retries_left, 0)
        self.assertEqual([], queue.job_ids)
        # If a job is no longer retries, it's put in FailedJobRegistry
        self.assertTrue(job in registry)
コード例 #24
0
def serialize_queues(queues):
    return [
        dict(
            name=q.name,
            count=q.count,
            queued_url=url_for('.overview',
                               content_name='jobs',
                               queue_name=q.name),
            failed_job_registry_count=FailedJobRegistry(q.name).count,
            failed_url=url_for('.overview',
                               content_name='jobs',
                               queue_name=q.name,
                               registry_name='failed'),
            started_job_registry_count=StartedJobRegistry(q.name).count,
            started_url=url_for('.overview',
                                content_name='jobs',
                                queue_name=q.name,
                                registry_name='started'),
            deferred_job_registry_count=DeferredJobRegistry(q.name).count,
            deferred_url=url_for('.overview',
                                 content_name='jobs',
                                 queue_name=q.name,
                                 registry_name='deferred'),
            finished_job_registry_count=FinishedJobRegistry(q.name).count,
            finished_url=url_for('.overview',
                                 content_name='jobs',
                                 queue_name=q.name,
                                 registry_name='finished'),
        ) for q in queues
    ]
コード例 #25
0
def empty_queue(queue_name, registry_name):
    if registry_name == "queued":
        q = Queue(queue_name)
        q.empty()
    elif registry_name == "failed":
        ids = FailedJobRegistry(queue_name).get_job_ids()
        for id in ids:
            delete_job_view(id)
    elif registry_name == "deferred":
        ids = DeferredJobRegistry(queue_name).get_job_ids()
        for id in ids:
            delete_job_view(id)
    elif registry_name == "started":
        ids = StartedJobRegistry(queue_name).get_job_ids()
        for id in ids:
            delete_job_view(id)
    elif registry_name == "scheduled":
        ids = ScheduledJobRegistry(queue_name).get_job_ids()
        for id in ids:
            delete_job_view(id)
    elif registry_name == "finished":
        ids = FinishedJobRegistry(queue_name).get_job_ids()
        for id in ids:
            delete_job_view(id)
    return dict(status="OK")
コード例 #26
0
ファイル: model.py プロジェクト: mabounassif/label-studio
 def is_training(cls, project):
     if not cls.has_active_model(project):
         return {'is_training': False}
     m = cls.get(project)
     if cls.without_redis():
         return {
             'is_training': m.is_training,
             'backend': 'none',
             'model_version': m.model_version
         }
     else:
         started_jobs = StartedJobRegistry(cls._redis_queue.name, cls._redis_queue.connection).get_job_ids()
         finished_jobs = FinishedJobRegistry(cls._redis_queue.name, cls._redis_queue.connection).get_job_ids()
         failed_jobs = FailedJobRegistry(cls._redis_queue.name, cls._redis_queue.connection).get_job_ids()
         running_jobs = list(set(started_jobs) - set(finished_jobs + failed_jobs))
         logger.debug('Running jobs: ' + str(running_jobs))
         for job_id in running_jobs:
             job = Job.fetch(job_id, connection=cls._redis)
             if job.meta.get('project') == project:
                 return {
                     'is_training': True,
                     'job_id': job_id,
                     'backend': 'redis',
                     'model_version': m.model_version,
                 }
         return {
             'is_training': False,
             'backend': 'redis',
             'model_version': m.model_version
         }
コード例 #27
0
ファイル: routes.py プロジェクト: iyuershov/fedresurs-parser
def get_task_list():
    tasks = []
    registry = StartedJobRegistry('default', connection=connection)
    job_ids = registry.get_job_ids()
    for job_id in job_ids:
        j = Job.fetch(job_id, connection=connection)
        job_info = dict(guid=j.id, status=j.get_status())
        tasks.append(job_info)

    registry = FinishedJobRegistry('default', connection=connection)
    job_ids = registry.get_job_ids()
    for job_id in job_ids:
        j = Job.fetch(job_id, connection=connection)
        job_info = dict(guid=j.id, status=j.get_status())
        tasks.append(job_info)

    registry = FailedJobRegistry('default', connection=connection)
    job_ids = registry.get_job_ids()
    for job_id in job_ids:
        j = Job.fetch(job_id, connection=connection)
        job_info = dict(guid=j.id, status=j.get_status())
        tasks.append(job_info)

    return jsonify({
        "tasks": tasks
    }), 201
コード例 #28
0
    def test_failed_jobs(self):
        """Ensure that failed jobs page works properly."""
        queue = get_queue('django_rq_test')
        queue_index = get_queue_index('django_rq_test')

        # Test that page doesn't fail when FailedJobRegistry is empty
        response = self.client.get(
            reverse('rq_failed_jobs', args=[queue_index]))
        self.assertEqual(response.status_code, 200)

        job = queue.enqueue(access_self)
        registry = FailedJobRegistry(queue.name, queue.connection)
        registry.add(job, 2)
        response = self.client.get(
            reverse('rq_failed_jobs', args=[queue_index]))
        self.assertEqual(response.context['jobs'], [job])
コード例 #29
0
ファイル: test_worker.py プロジェクト: naga-chandana/rq
    def test_horse_fails(self):
        """Tests that job status is set to FAILED even if horse unexpectedly fails"""
        q = Queue()
        self.assertEqual(q.count, 0)

        # Action
        job = q.enqueue(say_hello)
        self.assertEqual(q.count, 1)

        # keep for later
        enqueued_at_date = str(job.enqueued_at)

        w = Worker([q])
        with mock.patch.object(w, 'perform_job', new_callable=raise_exc_mock):
            w.work(burst=True)  # should silently pass

        # Postconditions
        self.assertEqual(q.count, 0)
        failed_job_registry = FailedJobRegistry(queue=q)
        self.assertTrue(job in failed_job_registry)
        self.assertEqual(w.get_current_job_id(), None)

        # Check the job
        job = Job.fetch(job.id)
        self.assertEqual(job.origin, q.name)

        # Should be the original enqueued_at date, not the date of enqueueing
        # to the failed queue
        self.assertEqual(str(job.enqueued_at), enqueued_at_date)
        self.assertTrue(job.exc_info)  # should contain exc_info
コード例 #30
0
    def test_work_is_unreadable(self):
        """Unreadable jobs are put on the failed job registry."""
        q = Queue()
        self.assertEqual(q.count, 0)

        # NOTE: We have to fake this enqueueing for this test case.
        # What we're simulating here is a call to a function that is not
        # importable from the worker process.
        job = Job.create(func=div_by_zero, args=(3,), origin=q.name)
        job.save()

        job_data = job.data
        invalid_data = job_data.replace(b'div_by_zero', b'nonexisting')
        assert job_data != invalid_data
        self.testconn.hset(job.key, 'data', zlib.compress(invalid_data))

        # We use the low-level internal function to enqueue any data (bypassing
        # validity checks)
        q.push_job_id(job.id)

        self.assertEqual(q.count, 1)

        # All set, we're going to process it
        w = Worker([q])
        w.work(burst=True)   # should silently pass
        self.assertEqual(q.count, 0)

        failed_job_registry = FailedJobRegistry(queue=q)
        self.assertTrue(job in failed_job_registry)
コード例 #31
0
    def test_work_fails(self):
        """Failing jobs are put on the failed queue."""
        q = Queue()
        self.assertEqual(q.count, 0)

        # Action
        job = q.enqueue(div_by_zero)
        self.assertEqual(q.count, 1)

        # keep for later
        enqueued_at_date = str(job.enqueued_at)

        w = Worker([q])
        w.work(burst=True)  # should silently pass

        # Postconditions
        self.assertEqual(q.count, 0)
        failed_job_registry = FailedJobRegistry(queue=q)
        self.assertTrue(job in failed_job_registry)
        self.assertEqual(w.get_current_job_id(), None)

        # Check the job
        job = Job.fetch(job.id)
        self.assertEqual(job.origin, q.name)

        # Should be the original enqueued_at date, not the date of enqueueing
        # to the failed queue
        self.assertEqual(str(job.enqueued_at), enqueued_at_date)
        self.assertTrue(job.exc_info)  # should contain exc_info
コード例 #32
0
ファイル: test_worker.py プロジェクト: mbarkhau/rq
 def test_work_horse_force_death(self):
     """Simulate a frozen worker that doesn't observe the timeout properly.
     Fake it by artificially setting the timeout of the parent process to
     something much smaller after the process is already forked.
     """
     fooq = Queue('foo')
     self.assertEqual(fooq.count, 0)
     w = Worker(fooq)
     sentinel_file = '/tmp/.rq_sentinel_work_horse_death'
     if os.path.exists(sentinel_file):
         os.remove(sentinel_file)
     fooq.enqueue(create_file_after_timeout, sentinel_file, 100)
     job, queue = w.dequeue_job_and_maintain_ttl(5)
     w.fork_work_horse(job, queue)
     job.timeout = 5
     w.job_monitoring_interval = 1
     now = utcnow()
     w.monitor_work_horse(job)
     fudge_factor = 1
     total_time = w.job_monitoring_interval + 65 + fudge_factor
     self.assertTrue((utcnow() - now).total_seconds() < total_time)
     self.assertEqual(job.get_status(), JobStatus.FAILED)
     failed_job_registry = FailedJobRegistry(queue=fooq)
     self.assertTrue(job in failed_job_registry)
     self.assertEqual(fooq.count, 0)
コード例 #33
0
ファイル: redisqueue.py プロジェクト: splogamurugan/SIP
    def stats(self):

        queue_data = {}
        workers = self.workers()
        queued = self.queued_jobs()

        with Connection(redis.from_url(self.url)):
            q = Queue()
            q.connection
            finished_job_registry = FinishedJobRegistry()
            started_jobs_registry = StartedJobRegistry()
            deferred_jobs_registry = DeferredJobRegistry()
            failed_jobs_registry = FailedJobRegistry()
            worker = Worker(['default'])

            queue_data['finished_jobs'] = len(finished_job_registry)

            queue_data['started_jobs'] = len(started_jobs_registry)
            queue_data['deferred_jobs'] = len(deferred_jobs_registry)
            queue_data['failed_jobs'] = len(failed_jobs_registry)
            queue_data['workers'] = len(workers)
            queue_data['queued_jobs'] = len(queued)
            queue_data['active_jobs'] = queue_data[
                'started_jobs'] + queue_data['queued_jobs']

        return queue_data
コード例 #34
0
ファイル: test_registry.py プロジェクト: nvie/rq
    def test_worker_handle_job_failure(self):
        """Failed jobs are added to FailedJobRegistry"""
        q = Queue(connection=self.testconn)

        w = Worker([q])
        registry = FailedJobRegistry(connection=w.connection)

        timestamp = current_timestamp()

        job = q.enqueue(div_by_zero, failure_ttl=5)
        w.handle_job_failure(job)
        # job is added to FailedJobRegistry with default failure ttl
        self.assertIn(job.id, registry.get_job_ids())
        self.assertLess(self.testconn.zscore(registry.key, job.id),
                        timestamp + DEFAULT_FAILURE_TTL + 5)

        # job is added to FailedJobRegistry with specified ttl
        job = q.enqueue(div_by_zero, failure_ttl=5)
        w.handle_job_failure(job)
        self.assertLess(self.testconn.zscore(registry.key, job.id),
                        timestamp + 7)
コード例 #35
0
ファイル: cli.py プロジェクト: nvie/rq
def requeue(cli_config, queue, all, job_class, job_ids,  **options):
    """Requeue failed jobs."""

    failed_job_registry = FailedJobRegistry(queue,
                                            connection=cli_config.connection)
    if all:
        job_ids = failed_job_registry.get_job_ids()

    if not job_ids:
        click.echo('Nothing to do')
        sys.exit(0)

    click.echo('Requeueing {0} jobs from failed queue'.format(len(job_ids)))
    fail_count = 0
    with click.progressbar(job_ids) as job_ids:
        for job_id in job_ids:
            try:
                failed_job_registry.requeue(job_id)
            except InvalidJobOperationError:
                fail_count += 1

    if fail_count > 0:
        click.secho('Unable to requeue {0} jobs from failed job registry'.format(fail_count), fg='red')
コード例 #36
0
ファイル: views.py プロジェクト: ui/django-rq
def failed_jobs(request, queue_index):
    queue_index = int(queue_index)
    queue = get_queue_by_index(queue_index)

    registry = FailedJobRegistry(queue.name, queue.connection)

    items_per_page = 100
    num_jobs = len(registry)
    page = int(request.GET.get('page', 1))
    jobs = []

    if num_jobs > 0:
        last_page = int(ceil(num_jobs / items_per_page))
        page_range = range(1, last_page + 1)
        offset = items_per_page * (page - 1)
        job_ids = registry.get_job_ids(offset, offset + items_per_page - 1)

        for job_id in job_ids:
            try:
                jobs.append(Job.fetch(job_id, connection=queue.connection))
            except NoSuchJobError:
                pass

    else:
        page_range = []

    context_data = {
        'queue': queue,
        'queue_index': queue_index,
        'jobs': jobs,
        'num_jobs': num_jobs,
        'page': page,
        'page_range': page_range,
        'job_status': 'Failed',
    }
    return render(request, 'django_rq/jobs.html', context_data)
コード例 #37
0
ファイル: test_job.py プロジェクト: nvie/rq
    def test_job_delete_removes_itself_from_registries(self):
        """job.delete() should remove itself from job registries"""
        connection = self.testconn
        job = Job.create(func=fixtures.say_hello, status=JobStatus.FAILED,
                         connection=self.testconn, origin='default')
        job.save()
        registry = FailedJobRegistry(connection=self.testconn)
        registry.add(job, 500)

        job.delete()
        self.assertFalse(job in registry)

        job = Job.create(func=fixtures.say_hello, status=JobStatus.FINISHED,
                         connection=self.testconn, origin='default')
        job.save()

        registry = FinishedJobRegistry(connection=self.testconn)
        registry.add(job, 500)

        job.delete()
        self.assertFalse(job in registry)

        job = Job.create(func=fixtures.say_hello, status=JobStatus.STARTED,
                         connection=self.testconn, origin='default')
        job.save()

        registry = StartedJobRegistry(connection=self.testconn)
        registry.add(job, 500)

        job.delete()
        self.assertFalse(job in registry)

        job = Job.create(func=fixtures.say_hello, status=JobStatus.DEFERRED,
                         connection=self.testconn, origin='default')
        job.save()

        registry = DeferredJobRegistry(connection=self.testconn)
        registry.add(job, 500)

        job.delete()
        self.assertFalse(job in registry)