def test_enqueue_autocommit_on(self): """ Running ``enqueue`` when AUTOCOMMIT is on should immediately persist job into Redis. """ queue = get_queue() job = queue.enqueue(divide, 1, 1) self.assertTrue(job.id in queue.job_ids) job.delete()
def test_scheduler_default_result_ttl(self): """ Ensure scheduler respects DEFAULT_RESULT_TTL value for `result_ttl` param. """ scheduler = get_scheduler('test_scheduler') job = scheduler.enqueue_at(datetime.datetime.now() + datetime.timedelta(days=1), divide, 1, 1) self.assertTrue(job in scheduler.get_jobs()) self.assertEqual(job.result_ttl, 5432) job.delete()
def flush_registry(registry): connection = registry.connection for job_id in registry.get_job_ids(): connection.zrem(registry.key, job_id) try: job = Job.fetch(job_id, connection=connection) job.delete() except NoSuchJobError: pass
def test_get_current_job(self): """ Ensure that functions using RQ's ``get_current_job`` doesn't fail when run from rqworker (the job id is not in the failed queue). """ queue = get_queue() job = queue.enqueue(access_self) call_command('rqworker', burst=True) failed_queue = Queue(name='failed', connection=queue.connection) self.assertFalse(job.id in failed_queue.job_ids) job.delete()
def test_get_current_job(self): """ Ensure that functions using RQ's ``get_current_job`` doesn't fail when run from rqworker (the job id is not in the failed queue). """ queue = get_queue() job = queue.enqueue(access_self) call_command('rqworker', '--burst') failed_queue = Queue(name='failed', connection=queue.connection) self.assertFalse(job.id in failed_queue.job_ids) job.delete()
def delete_job(request, queue_index, job_id): queue_index = int(queue_index) queue = get_queue_by_index(queue_index) job = Job.fetch(job_id, connection=queue.connection) if request.POST: # Remove job id from queue and delete the actual job queue.connection._lrem(queue.key, 0, job.id) job.delete() messages.info(request, "You have successfully deleted %s" % job.id) return redirect("rq_jobs", queue_index) context_data = {"queue_index": queue_index, "job": job, "queue": queue} return render(request, "django_rq/delete_job.html", context_data)
def test_requeue_job(self): """ Ensure that a failed job gets requeued when rq_requeue_job is called """ def failing_job(): raise ValueError queue = get_queue('default') job = queue.enqueue(failing_job) worker = get_worker('default') worker.work(burst=True) job.refresh() self.assertTrue(job.is_failed) self.client.post(reverse('rq_requeue_job', args=[queue.connection_name, queue.name, job.id]), {'requeue': 'Requeue'}) self.assertIn(job, queue.jobs) job.delete()
def test_requeue_job(self): """ Ensure that a failed job gets requeued when rq_requeue_job is called """ def failing_job(): raise ValueError queue = get_queue('default') queue_index = get_failed_queue_index('default') job = queue.enqueue(failing_job) worker = get_worker('default') worker.work(burst=True) job.refresh() self.assertTrue(job.is_failed) self.client.post(reverse('rq_requeue_job', args=[queue_index, job.id]), {'requeue': 'Requeue'}) self.assertIn(job, queue.jobs) job.delete()
def test_requeue_job(self): """ Ensure that a failed job gets requeued when rq_requeue_job is called """ def failing_job(): raise ValueError queue = get_queue("default") queue_index = get_failed_queue_index("default") job = queue.enqueue(failing_job) worker = get_worker("default") worker.work(burst=True) job.refresh() self.assertTrue(job.is_failed) self.client.post(reverse("rq_requeue_job", args=[queue_index, job.id]), {"requeue": "Requeue"}) self.assertIn(job, queue.jobs) job.delete()