def enqueue_at(self, datetime, f, *args, **kwargs): """Schedules a job to be enqueued at specified time""" from .registry import ScheduledJobRegistry (f, timeout, description, result_ttl, ttl, failure_ttl, depends_on, job_id, at_front, meta, args, kwargs) = Queue.parse_args(f, *args, **kwargs) job = self.create_job(f, status=JobStatus.SCHEDULED, args=args, kwargs=kwargs, timeout=timeout, result_ttl=result_ttl, ttl=ttl, failure_ttl=failure_ttl, description=description, depends_on=depends_on, job_id=job_id, meta=meta) registry = ScheduledJobRegistry(queue=self) with self.connection.pipeline() as pipeline: # Add Queue key set pipeline.sadd(self.redis_queues_keys, self.key) job.save(pipeline=pipeline) registry.schedule(job, datetime, pipeline=pipeline) pipeline.execute() return job
def scheduled_jobs(request, queue_index): queue_index = int(queue_index) queue = get_queue_by_index(queue_index) registry = ScheduledJobRegistry(queue.name, queue.connection) items_per_page = 100 num_jobs = len(registry) page = int(request.GET.get('page', 1)) jobs = [] if num_jobs > 0: last_page = int(ceil(num_jobs / items_per_page)) page_range = range(1, last_page + 1) offset = items_per_page * (page - 1) job_ids = registry.get_job_ids(offset, offset + items_per_page - 1) jobs = get_jobs(queue, job_ids, registry) for job in jobs: job.scheduled_at = registry.get_scheduled_time(job) else: page_range = [] context_data = { 'queue': queue, 'queue_index': queue_index, 'jobs': jobs, 'num_jobs': num_jobs, 'page': page, 'page_range': page_range, 'job_status': 'Scheduled', } return render(request, 'django_rq/jobs.html', context_data)
def test_schedule(self): """Adding job with the correct score to ScheduledJobRegistry""" queue = Queue(connection=self.testconn) job = Job.create('myfunc', connection=self.testconn) job.save() registry = ScheduledJobRegistry(queue=queue) if PY2: # On Python 2, datetime needs to have timezone self.assertRaises(ValueError, registry.schedule, job, datetime(2019, 1, 1)) registry.schedule(job, datetime(2019, 1, 1, tzinfo=utc)) self.assertEqual(self.testconn.zscore(registry.key, job.id), 1546300800) # 2019-01-01 UTC in Unix timestamp else: from datetime import timezone # If we pass in a datetime with no timezone, `schedule()` # assumes local timezone so depending on your local timezone, # the timestamp maybe different # we need to account for the difference between a timezone # with DST active and without DST active. The time.timezone # property isn't accurate when time.daylight is non-zero, # we'll test both. # first, time.daylight == 0 (not in DST). # mock the sitatuoin for American/New_York not in DST (UTC - 5) # time.timezone = 18000 # time.daylight = 0 # time.altzone = 14400 mock_day = mock.patch('time.daylight', 0) mock_tz = mock.patch('time.timezone', 18000) mock_atz = mock.patch('time.altzone', 14400) with mock_tz, mock_day, mock_atz: registry.schedule(job, datetime(2019, 1, 1)) self.assertEqual(self.testconn.zscore(registry.key, job.id), 1546300800 + 18000) # 2019-01-01 UTC in Unix timestamp # second, time.daylight != 0 (in DST) # mock the sitatuoin for American/New_York not in DST (UTC - 4) # time.timezone = 18000 # time.daylight = 1 # time.altzone = 14400 mock_day = mock.patch('time.daylight', 1) mock_tz = mock.patch('time.timezone', 18000) mock_atz = mock.patch('time.altzone', 14400) with mock_tz, mock_day, mock_atz: registry.schedule(job, datetime(2019, 1, 1)) self.assertEqual(self.testconn.zscore(registry.key, job.id), 1546300800 + 14400) # 2019-01-01 UTC in Unix timestamp # Score is always stored in UTC even if datetime is in a different tz tz = timezone(timedelta(hours=7)) job = Job.create('myfunc', connection=self.testconn) job.save() registry.schedule(job, datetime(2019, 1, 1, 7, tzinfo=tz)) self.assertEqual(self.testconn.zscore(registry.key, job.id), 1546300800) # 2019-01-01 UTC in Unix timestamp
def test_enqueue_at(self): """enqueue_at() creates a job in ScheduledJobRegistry""" queue = Queue(connection=self.testconn) scheduled_time = datetime.now(utc) + timedelta(seconds=10) job = queue.enqueue_at(scheduled_time, say_hello) registry = ScheduledJobRegistry(queue=queue) self.assertIn(job, registry) self.assertTrue(registry.get_expiration_time(job), scheduled_time)
def test_enqueue_in(self): """queue.enqueue_in() schedules job correctly""" queue = Queue(connection=self.testconn) registry = ScheduledJobRegistry(queue=queue) job = queue.enqueue_in(timedelta(seconds=30), say_hello) now = datetime.now(timezone.utc) scheduled_time = registry.get_scheduled_time(job) # Ensure that job is scheduled roughly 30 seconds from now self.assertTrue(now + timedelta(seconds=28) < scheduled_time < now + timedelta(seconds=32))
def enqueue_at(self, datetime, func, *args, **kwargs): """Schedules a job to be enqueued at specified time""" from .registry import ScheduledJobRegistry job = self.create_job(func, *args, **kwargs) registry = ScheduledJobRegistry(queue=self) with self.connection.pipeline() as pipeline: job.save(pipeline=pipeline) registry.schedule(job, datetime, pipeline=pipeline) pipeline.execute() return job
def test_prepare_registries(self): """prepare_registries() creates self._scheduled_job_registries""" foo_queue = Queue('foo', connection=self.testconn) bar_queue = Queue('bar', connection=self.testconn) scheduler = RQScheduler([foo_queue, bar_queue], connection=self.testconn) self.assertEqual(scheduler._scheduled_job_registries, []) scheduler.prepare_registries([foo_queue.name]) self.assertEqual(scheduler._scheduled_job_registries, [ScheduledJobRegistry(queue=foo_queue)]) scheduler.prepare_registries([foo_queue.name, bar_queue.name]) self.assertEqual( scheduler._scheduled_job_registries, [ScheduledJobRegistry(queue=foo_queue), ScheduledJobRegistry(queue=bar_queue)] )
def test_get_jobs_to_enqueue(self): """Getting job ids to enqueue from ScheduledJobRegistry.""" queue = Queue(connection=self.testconn) registry = ScheduledJobRegistry(queue=queue) timestamp = current_timestamp() self.testconn.zadd(registry.key, {'foo': 1}) self.testconn.zadd(registry.key, {'bar': timestamp + 10}) self.testconn.zadd(registry.key, {'baz': timestamp + 30}) self.assertEqual(registry.get_jobs_to_enqueue(), ['foo']) self.assertEqual(registry.get_jobs_to_enqueue(timestamp + 20), ['foo', 'bar'])
def test_schedule(self): """Adding job with the correct score to ScheduledJobRegistry""" queue = Queue(connection=self.testconn) job = Job.create('myfunc', connection=self.testconn) job.save() registry = ScheduledJobRegistry(queue=queue) if PY2: # On Python 2, datetime needs to have timezone self.assertRaises(ValueError, registry.schedule, job, datetime(2019, 1, 1)) registry.schedule(job, datetime(2019, 1, 1, tzinfo=utc)) self.assertEqual(self.testconn.zscore(registry.key, job.id), 1546300800) # 2019-01-01 UTC in Unix timestamp else: from datetime import timezone # If we pass in a datetime with no timezone, `schedule()` # assumes local timezone so depending on your local timezone, # the timestamp maybe different registry.schedule(job, datetime(2019, 1, 1)) self.assertEqual(self.testconn.zscore(registry.key, job.id), 1546300800 + time.timezone) # 2019-01-01 UTC in Unix timestamp # Score is always stored in UTC even if datetime is in a different tz tz = timezone(timedelta(hours=7)) job = Job.create('myfunc', connection=self.testconn) job.save() registry.schedule(job, datetime(2019, 1, 1, 7, tzinfo=tz)) self.assertEqual(self.testconn.zscore(registry.key, job.id), 1546300800) # 2019-01-01 UTC in Unix timestamp
def schedule_job(self, job, datetime, pipeline=None): """Puts job on ScheduledJobRegistry""" from .registry import ScheduledJobRegistry registry = ScheduledJobRegistry(queue=self) pipe = pipeline if pipeline is not None else self.connection.pipeline() # Add Queue key set pipe.sadd(self.redis_queues_keys, self.key) job.save(pipeline=pipe) registry.schedule(job, datetime, pipeline=pipe) if pipeline is None: pipe.execute() return job
def test_get_jobs_to_schedule_with_chunk_size(self): """Max amount of jobs returns by get_jobs_to_schedule() equal to chunk_size""" queue = Queue(connection=self.testconn) registry = ScheduledJobRegistry(queue=queue) timestamp = current_timestamp() chunk_size = 5 for index in range(0, chunk_size * 2): self.testconn.zadd(registry.key, {'foo_{}'.format(index): 1}) self.assertEqual(len(registry.get_jobs_to_schedule(timestamp, chunk_size)), chunk_size) self.assertEqual(len(registry.get_jobs_to_schedule(timestamp, chunk_size * 2)), chunk_size * 2)
def enqueue_at(self, datetime, func, *args, **kwargs): """Schedules a job to be enqueued at specified time""" from .registry import ScheduledJobRegistry if not isinstance(func, string_types) and func.__module__ == '__main__': raise ValueError( 'Functions from the __main__ module cannot be processed ' 'by workers') # Detect explicit invocations, i.e. of the form: # q.enqueue(foo, args=(1, 2), kwargs={'a': 1}, job_timeout=30) timeout = kwargs.pop('job_timeout', None) description = kwargs.pop('description', None) result_ttl = kwargs.pop('result_ttl', None) ttl = kwargs.pop('ttl', None) failure_ttl = kwargs.pop('failure_ttl', None) depends_on = kwargs.pop('depends_on', None) job_id = kwargs.pop('job_id', None) at_front = kwargs.pop('at_front', False) meta = kwargs.pop('meta', None) if 'args' in kwargs or 'kwargs' in kwargs: assert args == ( ), 'Extra positional arguments cannot be used when using explicit args and kwargs' # noqa args = kwargs.pop('args', None) kwargs = kwargs.pop('kwargs', None) job = self.create_job(func, status=JobStatus.SCHEDULED, args=args, kwargs=kwargs, timeout=timeout, result_ttl=result_ttl, ttl=ttl, failure_ttl=failure_ttl, description=description, depends_on=depends_on, job_id=job_id, meta=meta) registry = ScheduledJobRegistry(queue=self) with self.connection.pipeline() as pipeline: job.save(pipeline=pipeline) registry.schedule(job, datetime, pipeline=pipeline) pipeline.execute() return job
def empty_queue(queue_name, registry_name): if registry_name == "queued": q = Queue(queue_name) q.empty() elif registry_name == "failed": ids = FailedJobRegistry(queue_name).get_job_ids() for id in ids: delete_job_view(id) elif registry_name == "deferred": ids = DeferredJobRegistry(queue_name).get_job_ids() for id in ids: delete_job_view(id) elif registry_name == "started": ids = StartedJobRegistry(queue_name).get_job_ids() for id in ids: delete_job_view(id) elif registry_name == "scheduled": ids = ScheduledJobRegistry(queue_name).get_job_ids() for id in ids: delete_job_view(id) elif registry_name == "finished": ids = FinishedJobRegistry(queue_name).get_job_ids() for id in ids: delete_job_view(id) return dict(status="OK")
def test_getting_registries_with_serializer(self): """Getting job registries from queue object (with custom serializer)""" queue = Queue('example', serializer=JSONSerializer) self.assertEqual(queue.scheduled_job_registry, ScheduledJobRegistry(queue=queue)) self.assertEqual(queue.started_job_registry, StartedJobRegistry(queue=queue)) self.assertEqual(queue.failed_job_registry, FailedJobRegistry(queue=queue)) self.assertEqual(queue.deferred_job_registry, DeferredJobRegistry(queue=queue)) self.assertEqual(queue.finished_job_registry, FinishedJobRegistry(queue=queue)) self.assertEqual(queue.canceled_job_registry, CanceledJobRegistry(queue=queue)) # Make sure we don't use default when queue has custom self.assertEqual(queue.scheduled_job_registry.serializer, JSONSerializer) self.assertEqual(queue.started_job_registry.serializer, JSONSerializer) self.assertEqual(queue.failed_job_registry.serializer, JSONSerializer) self.assertEqual(queue.deferred_job_registry.serializer, JSONSerializer) self.assertEqual(queue.finished_job_registry.serializer, JSONSerializer) self.assertEqual(queue.canceled_job_registry.serializer, JSONSerializer)
def test_get_jobs(self): """get_jobs() works properly""" queue = get_queue('django_rq_test') registry = ScheduledJobRegistry(queue.name, queue.connection) flush_registry(registry) now = datetime.datetime.now() job = queue.enqueue_at(now, access_self) job2 = queue.enqueue_at(now, access_self) self.assertEqual( get_jobs(queue, [job.id, job2.id]), [job, job2] ) self.assertEqual(len(registry), 2) # job has been deleted, so the result will be filtered out queue.connection.delete(job.key) self.assertEqual( get_jobs(queue, [job.id, job2.id]), [job2] ) self.assertEqual(len(registry), 2) # If job has been deleted and `registry` is passed, # job will also be removed from registry queue.connection.delete(job2.key) self.assertEqual( get_jobs(queue, [job.id, job2.id], registry), [] ) self.assertEqual(len(registry), 0)
def serialize_queues(instance_number, queues): return [ dict( name=q.name, count=q.count, queued_url=url_for( ".jobs_overview", instance_number=instance_number, queue_name=q.name, registry_name="queued", per_page="8", page="1", ), failed_job_registry_count=FailedJobRegistry(q.name).count, failed_url=url_for( ".jobs_overview", instance_number=instance_number, queue_name=q.name, registry_name="failed", per_page="8", page="1", ), started_job_registry_count=StartedJobRegistry(q.name).count, started_url=url_for( ".jobs_overview", instance_number=instance_number, queue_name=q.name, registry_name="started", per_page="8", page="1", ), scheduled_job_registry_count=ScheduledJobRegistry(q.name).count, scheduled_url=url_for( ".jobs_overview", instance_number=instance_number, queue_name=q.name, registry_name="scheduled", per_page="8", page="1", ), deferred_job_registry_count=DeferredJobRegistry(q.name).count, deferred_url=url_for( ".jobs_overview", instance_number=instance_number, queue_name=q.name, registry_name="deferred", per_page="8", page="1", ), finished_job_registry_count=FinishedJobRegistry(q.name).count, finished_url=url_for( ".jobs_overview", instance_number=instance_number, queue_name=q.name, registry_name="finished", per_page="8", page="1", ), ) for q in queues ]
def clear_all_redis_jobs(): registry = ScheduledJobRegistry(queue=current_app.auto_jobs) for job_id in registry.get_job_ids(): registry.remove(job_id, delete_job=True) for queue in current_app.redis_queues: queue.delete() result = { 'status': 'success', 'msg': 'Cleared all redis jobs', 'issues': [] } return jsonify(result=result)
def get_queue_registry_jobs_count(queue_name, registry_name, offset, per_page): queue = Queue(queue_name) if registry_name != "queued": if per_page >= 0: per_page = offset + (per_page - 1) if registry_name == "failed": current_queue = FailedJobRegistry(queue_name) elif registry_name == "deferred": current_queue = DeferredJobRegistry(queue_name) elif registry_name == "started": current_queue = StartedJobRegistry(queue_name) elif registry_name == "finished": current_queue = FinishedJobRegistry(queue_name) elif registry_name == "scheduled": current_queue = ScheduledJobRegistry(queue_name) else: current_queue = queue total_items = current_queue.count job_ids = current_queue.get_job_ids(offset, per_page) current_queue_jobs = [queue.fetch_job(job_id) for job_id in job_ids] jobs = [serialize_job(job) for job in current_queue_jobs] return (total_items, jobs)
def test_job_delete_removes_itself_from_registries(self): """job.delete() should remove itself from job registries""" job = Job.create(func=fixtures.say_hello, status=JobStatus.FAILED, connection=self.testconn, origin='default') job.save() registry = FailedJobRegistry(connection=self.testconn) registry.add(job, 500) job.delete() self.assertFalse(job in registry) job = Job.create(func=fixtures.say_hello, status=JobStatus.FINISHED, connection=self.testconn, origin='default') job.save() registry = FinishedJobRegistry(connection=self.testconn) registry.add(job, 500) job.delete() self.assertFalse(job in registry) job = Job.create(func=fixtures.say_hello, status=JobStatus.STARTED, connection=self.testconn, origin='default') job.save() registry = StartedJobRegistry(connection=self.testconn) registry.add(job, 500) job.delete() self.assertFalse(job in registry) job = Job.create(func=fixtures.say_hello, status=JobStatus.DEFERRED, connection=self.testconn, origin='default') job.save() registry = DeferredJobRegistry(connection=self.testconn) registry.add(job, 500) job.delete() self.assertFalse(job in registry) job = Job.create(func=fixtures.say_hello, status=JobStatus.SCHEDULED, connection=self.testconn, origin='default') job.save() registry = ScheduledJobRegistry(connection=self.testconn) registry.add(job, 500) job.delete() self.assertFalse(job in registry)
def test_cli_enqueue_schedule_in(self): """rq enqueue -u <url> tests.fixtures.say_hello --schedule-in 1s""" queue = Queue(connection=self.connection) registry = ScheduledJobRegistry(queue=queue) worker = Worker(queue) scheduler = RQScheduler(queue, self.connection) self.assertTrue(len(queue) == 0) self.assertTrue(len(registry) == 0) runner = CliRunner() result = runner.invoke(main, [ 'enqueue', '-u', self.redis_url, 'tests.fixtures.say_hello', '--schedule-in', '10s' ]) self.assert_normal_execution(result) scheduler.acquire_locks() scheduler.enqueue_scheduled_jobs() self.assertTrue(len(queue) == 0) self.assertTrue(len(registry) == 1) self.assertFalse(worker.work(True)) sleep(11) scheduler.enqueue_scheduled_jobs() self.assertTrue(len(queue) == 1) self.assertTrue(len(registry) == 0) self.assertTrue(worker.work(True))
def test_getting_registries(self): """Getting job registries from queue object""" queue = Queue('example') self.assertEqual(queue.scheduled_job_registry, ScheduledJobRegistry(queue=queue)) self.assertEqual(queue.started_job_registry, StartedJobRegistry(queue=queue)) self.assertEqual(queue.failed_job_registry, FailedJobRegistry(queue=queue)) self.assertEqual(queue.deferred_job_registry, DeferredJobRegistry(queue=queue)) self.assertEqual(queue.finished_job_registry, FinishedJobRegistry(queue=queue))
def get_statistics(run_maintenance_tasks=False): queues = [] for index, config in enumerate(QUEUES_LIST): queue = get_queue_by_index(index) connection = queue.connection connection_kwargs = connection.connection_pool.connection_kwargs if run_maintenance_tasks: clean_registries(queue) clean_worker_registry(queue) # Raw access to the first item from left of the redis list. # This might not be accurate since new job can be added from the left # with `at_front` parameters. # Ideally rq should supports Queue.oldest_job last_job_id = connection.lindex(queue.key, 0) last_job = queue.fetch_job( last_job_id.decode('utf-8')) if last_job_id else None if last_job: oldest_job_timestamp = to_localtime(last_job.enqueued_at)\ .strftime('%Y-%m-%d, %H:%M:%S') else: oldest_job_timestamp = "-" # parse_class and connection_pool are not needed and not JSON serializable connection_kwargs.pop('parser_class', None) connection_kwargs.pop('connection_pool', None) queue_data = { 'name': queue.name, 'jobs': queue.count, 'oldest_job_timestamp': oldest_job_timestamp, 'index': index, 'connection_kwargs': connection_kwargs } connection = get_connection(queue.name) queue_data['workers'] = Worker.count(queue=queue) finished_job_registry = FinishedJobRegistry(queue.name, connection) started_job_registry = StartedJobRegistry(queue.name, connection) deferred_job_registry = DeferredJobRegistry(queue.name, connection) failed_job_registry = FailedJobRegistry(queue.name, connection) scheduled_job_registry = ScheduledJobRegistry(queue.name, connection) queue_data['finished_jobs'] = len(finished_job_registry) queue_data['started_jobs'] = len(started_job_registry) queue_data['deferred_jobs'] = len(deferred_job_registry) queue_data['failed_jobs'] = len(failed_job_registry) queue_data['scheduled_jobs'] = len(scheduled_job_registry) queues.append(queue_data) return {'queues': queues}
def test_worker_with_scheduler(self): """rq worker -u <url> --with-scheduler""" queue = Queue(connection=self.connection) queue.enqueue_at(datetime(2019, 1, 1, tzinfo=timezone.utc), say_hello) registry = ScheduledJobRegistry(queue=queue) runner = CliRunner() result = runner.invoke(main, ['worker', '-u', self.redis_url, '-b']) self.assert_normal_execution(result) self.assertEqual(len(registry), 1) # 1 job still scheduled result = runner.invoke(main, ['worker', '-u', self.redis_url, '-b', '--with-scheduler']) self.assert_normal_execution(result) self.assertEqual(len(registry), 0) # Job has been enqueued
def test_scheduled_jobs_registry_removal(self): """Ensure that non existing job is being deleted from registry by view""" queue = get_queue('django_rq_test') queue_index = get_queue_index('django_rq_test') registry = ScheduledJobRegistry(queue.name, queue.connection) job = queue.enqueue_at(datetime.now(), access_self) self.assertEqual(len(registry), 1) queue.connection.delete(job.key) response = self.client.get( reverse('rq_scheduled_jobs', args=[queue_index])) self.assertEqual(response.context['jobs'], []) self.assertEqual(len(registry), 0)
def test_enqueue_at(self): """queue.enqueue_at() puts job in the scheduled""" queue = Queue(connection=self.testconn) registry = ScheduledJobRegistry(queue=queue) scheduler = RQScheduler([queue], connection=self.testconn) scheduler.acquire_locks() # Jobs created using enqueue_at is put in the ScheduledJobRegistry queue.enqueue_at(datetime(2019, 1, 1, tzinfo=utc), say_hello) self.assertEqual(len(queue), 0) self.assertEqual(len(registry), 1) # After enqueue_scheduled_jobs() is called, the registry is empty # and job is enqueued scheduler.enqueue_scheduled_jobs() self.assertEqual(len(queue), 1) self.assertEqual(len(registry), 0)
def serialize_job(job): if job.is_deferred: enqueued_at = "Awaiting completion: " + job._dependency_id elif ( not job.enqueued_at # This means that the job is scheduled cause only deferred or scheduled jobs enqueued_at field is Null ): enqueued_at = ScheduledJobRegistry(job.origin).get_scheduled_time(job) enqueued_at = serialize_date(enqueued_at) else: enqueued_at = serialize_date(job.enqueued_at) return dict( id=job.id, created_at=serialize_date(job.created_at), enqueued_at=enqueued_at, ended_at=serialize_date(job.ended_at), exc_info=str(job.exc_info) if job.exc_info else None, description=job.description, )
def test_get_scheduled_time(self): """get_scheduled_time() returns job's scheduled datetime""" queue = Queue(connection=self.testconn) registry = ScheduledJobRegistry(queue=queue) job = Job.create('myfunc', connection=self.testconn) job.save() dt = datetime(2019, 1, 1, tzinfo=timezone.utc) registry.schedule(job, datetime(2019, 1, 1, tzinfo=timezone.utc)) self.assertEqual(registry.get_scheduled_time(job), dt) # get_scheduled_time() should also work with job ID self.assertEqual(registry.get_scheduled_time(job.id), dt) # registry.get_scheduled_time() raises NoSuchJobError if # job.id is not found self.assertRaises(NoSuchJobError, registry.get_scheduled_time, '123')
def job_info(instance_number, job_id): job = Job.fetch(job_id) if job.is_deferred: enqueued_at = job._dependency_id elif ( not job.enqueued_at # This means that the job is scheduled cause only deferred or scheduled jobs enqueued_at field is Null ): enqueued_at = ScheduledJobRegistry(job.origin).get_scheduled_time(job) enqueued_at = serialize_date(enqueued_at) else: enqueued_at = serialize_date(job.enqueued_at) return dict( id=job.id, created_at=serialize_date(job.created_at), enqueued_at=enqueued_at, ended_at=serialize_date(job.ended_at), origin=job.origin, status=job.get_status(), result=job._result, exc_info=str(job.exc_info) if job.exc_info else None, description=job.description, )
def test_enqueue_scheduled_jobs(self): """Scheduler can enqueue scheduled jobs""" queue = Queue(connection=self.testconn) registry = ScheduledJobRegistry(queue=queue) job = Job.create('myfunc', connection=self.testconn) job.save() registry.schedule(job, datetime(2019, 1, 1, tzinfo=timezone.utc)) scheduler = RQScheduler([queue], connection=self.testconn) scheduler.acquire_locks() scheduler.enqueue_scheduled_jobs() self.assertEqual(len(queue), 1) # After job is scheduled, registry should be empty self.assertEqual(len(registry), 0) # Jobs scheduled in the far future should not be affected registry.schedule(job, datetime(2100, 1, 1, tzinfo=timezone.utc)) scheduler.enqueue_scheduled_jobs() self.assertEqual(len(queue), 1)
def schedual_jobs(repeat_in=30, initial_run=False): registry = ScheduledJobRegistry(queue=current_app.auto_jobs) if 'schedule_job' not in list(registry.get_job_ids()): for job_id in registry.get_job_ids(): registry.remove(job_id) print('Setting repeat jobs..') current_app.auto_jobs.enqueue_in(timedelta(minutes=repeat_in), task_check_uniref_has_blast_source) current_app.auto_jobs.enqueue_in(timedelta(minutes=repeat_in + 4), check_random_uniref) current_app.auto_jobs.enqueue_in(timedelta(minutes=repeat_in + 8), task_check_blast_status) current_app.auto_jobs.enqueue_in(timedelta(minutes=repeat_in + 12), task_check_ssn_status) current_app.auto_jobs.enqueue_in(timedelta(minutes=repeat_in + 16), schedual_jobs, job_id='schedule_job') if initial_run == True: current_app.auto_jobs.enqueue_in(timedelta(minutes=1), task_check_blast_status) current_app.auto_jobs.enqueue_in(timedelta(minutes=2), task_check_ssn_status)