def test_enqueue_in(self): """ Ensure that jobs have the right scheduled time. """ right_now = datetime.utcnow() time_delta = timedelta(minutes=1) job = self.scheduler.enqueue_in(time_delta, say_hello) self.assertIn(job.id, tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1))) self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id), to_unix(right_now + time_delta)) time_delta = timedelta(hours=1) job = self.scheduler.enqueue_in(time_delta, say_hello) self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id), to_unix(right_now + time_delta))
def test_crontab_schedules_correctly(self): # Create a job with a cronjob_string now = datetime.now().replace(minute=0, hour=0, second=0, microsecond=0) with freezegun.freeze_time(now): job = self.scheduler.cron("5 * * * * *", say_hello) with mock.patch.object(self.scheduler, 'enqueue_job', wraps=self.scheduler.enqueue_job) as enqueue_job, \ freezegun.freeze_time(now + timedelta(minutes=5)): self.assertEqual(1, self.scheduler.count()) self.scheduler.enqueue_jobs() self.assertEqual(1, enqueue_job.call_count) (job, next_scheduled_time), = self.scheduler.get_jobs(with_times=True) expected_scheduled_time = (now + timedelta(hours=1, minutes=5)).astimezone(UTC) self.assertEqual(to_unix(expected_scheduled_time), to_unix(next_scheduled_time))
def enqueue_job(self, job): """ Move a scheduled job to a queue. In addition, it also does puts the job back into the scheduler if needed. """ self.log.debug('Pushing {0} to {1}'.format(job.id, job.origin)) interval = job.meta.get('interval', None) repeat = job.meta.get('repeat', None) # If job is a repeated job, decrement counter if repeat: job.meta['repeat'] = int(repeat) - 1 job.enqueued_at = datetime.utcnow() job.save() queue = self.get_queue_for_job(job) queue.push_job_id(job.id) self.connection.zrem(self.scheduled_jobs_key, job.id) if interval: # If this is a repeat job and counter has reached 0, don't repeat if repeat is not None: if job.meta['repeat'] == 0: return self.connection._zadd(self.scheduled_jobs_key, to_unix(datetime.utcnow()) + int(interval), job.id)
def test_job_with_crontab_get_rescheduled(self): # Create a job with a cronjob_string job = self.scheduler.cron("1 * * * *", say_hello) # current unix_time old_next_scheduled_time = self.testconn.zscore( self.scheduler.scheduled_jobs_key, job.id) # change crontab job.meta['cron_string'] = "2 * * * *" # enqueue the job self.scheduler.enqueue_job(job) self.assertIn( job.id, tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1))) # check that next scheduled time has changed self.assertNotEqual( old_next_scheduled_time, self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id)) # check that new next scheduled time is set correctly expected_next_scheduled_time = to_unix( get_next_scheduled_time("2 * * * *")) self.assertEqual( self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id), expected_next_scheduled_time)
def get_jobs_to_queue(self, with_times=False): """ Returns a list of job instances that should be queued (score lower than current timestamp). If with_times is True a list of tuples consisting of the job instance and it's scheduled execution time is returned. """ return self.get_jobs(to_unix(datetime.utcnow()), with_times=with_times)
def enqueue_in(self, time_delta, func, *args, **kwargs): """ Similar to ``enqueue_at``, but accepts a timedelta instead of datetime object. The job's scheduled execution time will be calculated by adding the timedelta to datetime.utcnow(). """ job = self._create_job(func, args=args, kwargs=kwargs) self.connection._zadd(self.scheduled_jobs_key, to_unix(datetime.utcnow() + time_delta), job.id) return job
def test_change_execution_time(self): """ Ensure ``change_execution_time`` is called, ensure that job's score is updated """ job = self.scheduler.enqueue_at(datetime.utcnow(), say_hello) new_date = datetime(2010, 1, 1) self.scheduler.change_execution_time(job, new_date) self.assertEqual(to_unix(new_date), self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id)) self.scheduler.cancel(job) self.assertRaises(ValueError, self.scheduler.change_execution_time, job, new_date)
def test_job_with_intervals_get_rescheduled(self): """ Ensure jobs with interval attribute are put back in the scheduler """ time_now = datetime.utcnow() interval = 10 job = self.scheduler.schedule(time_now, say_hello, interval=interval) self.scheduler.enqueue_job(job) self.assertIn(job.id, tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1))) self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id), to_unix(time_now) + interval) # Now the same thing using enqueue_periodic job = self.scheduler.enqueue_periodic(time_now, interval, None, say_hello) self.scheduler.enqueue_job(job) self.assertIn(job.id, tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1))) self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id), to_unix(time_now) + interval)
def test_create_scheduled_job(self): """ Ensure that scheduled jobs are put in the scheduler queue with the right score """ scheduled_time = datetime.utcnow() job = self.scheduler.enqueue_at(scheduled_time, say_hello) self.assertEqual(job, Job.fetch(job.id, connection=self.testconn)) self.assertIn(job.id, tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1))) self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id), to_unix(scheduled_time))
def test_job_rescheduled_correctly(self): # log admin user self._logSuperuserIn() job = self.scheduler.enqueue_in( timedelta(minutes=60), dummy_job, ) rqjob = RQJob.objects.create(job_id=job.id, trigger=self.trigger) Job.fetch(job.id, connection=self.scheduler.connection) # no error url = reverse('admin:autoemails_rqjob_sendnow', args=[rqjob.pk]) rv = self.client.post(url, follow=True) self.assertIn( f'The job {job.id} was rescheduled to now.', rv.content.decode('utf-8'), ) for _job, time in self.scheduler.get_jobs(with_times=True): if _job.id == job.id: now = to_unix(datetime.utcnow()) epochtime = to_unix(time) self.assertAlmostEqual(epochtime, now, delta=60) # +- 60s
def get_jobs(self, until=None, with_times=False): """ Returns a list of job instances that will be queued until the given time. If no 'until' argument is given all jobs are returned. This function accepts datetime and timedelta instances as well as integers representing epoch values. If with_times is True a list of tuples consisting of the job instance and it's scheduled execution time is returned. """ def epoch_to_datetime(epoch): return from_unix(float(epoch)) if until is None: until = "+inf" elif isinstance(until, datetime): until = to_unix(until) elif isinstance(until, timedelta): until = to_unix((datetime.utcnow() + until)) job_ids = self.connection.zrangebyscore(self.scheduled_jobs_key, 0, until, withscores=with_times, score_cast_func=epoch_to_datetime) if not with_times: job_ids = zip(job_ids, repeat(None)) jobs = [] for job_id, sched_time in job_ids: job_id = job_id.decode('utf-8') try: job = Job.fetch(job_id, connection=self.connection) if with_times: jobs.append((job, sched_time)) else: jobs.append(job) except NoSuchJobError: # Delete jobs that aren't there from scheduler self.cancel(job_id) return jobs
def change_execution_time(self, job, date_time): """ Change a job's execution time. Wrap this in a transaction to prevent race condition. """ with self.connection._pipeline() as pipe: while 1: try: pipe.watch(self.scheduled_jobs_key) if pipe.zscore(self.scheduled_jobs_key, job.id) is None: raise ValueError('Job not in scheduled jobs queue') pipe.zadd(self.scheduled_jobs_key, to_unix(date_time), job.id) break except WatchError: # If job is still in the queue, retry otherwise job is already executed # so we raise an error if pipe.zscore(self.scheduled_jobs_key, job.id) is None: raise ValueError('Job not in scheduled jobs queue') continue
def schedule(self, scheduled_time, func, args=None, kwargs=None, interval=None, repeat=None, result_ttl=None, ttl=None, timeout=None, id=None, description=None, queue_name=None): """ Schedule a job to be periodically executed, at a certain interval. """ # Set result_ttl to -1 for periodic jobs, if result_ttl not specified if interval is not None and result_ttl is None: result_ttl = -1 job = self._create_job(func, args=args, kwargs=kwargs, commit=False, result_ttl=result_ttl, ttl=ttl, id=id or uuid(), description=description, queue_name=queue_name, timeout=timeout) scheduled_time = scheduled_time.replace(tzinfo=tzlocal()) if interval is not None: job.meta['interval'] = int(interval) if repeat is not None: job.meta['repeat'] = int(repeat) if repeat and interval is None: raise ValueError("Can't repeat a job without interval argument") job.save() self.connection._zadd(self.scheduled_jobs_key, to_unix(scheduled_time), job.id) return job
def enqueue_at(self, scheduled_time, func, *args, **kwargs): """ Pushes a job to the scheduler queue. The scheduled queue is a Redis sorted set ordered by timestamp - which in this case is job's scheduled execution time. Usage: from datetime import datetime from redis import Redis from rq.scheduler import Scheduler from foo import func redis = Redis() scheduler = Scheduler(queue_name='default', connection=redis) scheduler.enqueue_at(datetime(2020, 1, 1), func, 'argument', keyword='argument') """ job = self._create_job(func, args=args, kwargs=kwargs) self.connection._zadd(self.scheduled_jobs_key, to_unix(scheduled_time), job.id) return job
def schedule(self, scheduled_time, func, args=None, kwargs=None, interval=None, repeat=None, result_ttl=None, timeout=None): """ Schedule a job to be periodically executed, at a certain interval. """ # Set result_ttl to -1 for periodic jobs, if result_ttl not specified if interval is not None and result_ttl is None: result_ttl = -1 job = self._create_job(func, args=args, kwargs=kwargs, commit=False, result_ttl=result_ttl) if interval is not None: job.meta['interval'] = int(interval) if repeat is not None: job.meta['repeat'] = int(repeat) if repeat and interval is None: raise ValueError("Can't repeat a job without interval argument") if timeout is not None: job.timeout = timeout job.save() self.connection._zadd(self.scheduled_jobs_key, to_unix(scheduled_time), job.id) return job
def cron(self, cron_string, func, args=None, kwargs=None, repeat=None, queue_name=None, id=None, timeout=None, description=None): """ Schedule a cronjob """ scheduled_time = self.get_next_scheduled_time(cron_string) # Set result_ttl to -1, as jobs scheduled via cron are periodic ones. # Otherwise the job would expire after 500 sec. job = self._create_job(func, args=args, kwargs=kwargs, commit=False, result_ttl=-1, id=id or uuid(), queue_name=queue_name, description=description, timeout=timeout) job.meta['cron_string'] = cron_string if repeat is not None: job.meta['repeat'] = int(repeat) job.save() self.connection._zadd(self.scheduled_jobs_key, to_unix(scheduled_time), job.id) return job
def test_job_with_crontab_get_rescheduled(self): # Create a job with a cronjob_string job = self.scheduler.cron("1 * * * *", say_hello) # current unix_time old_next_scheduled_time = self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id) # change crontab job.meta['cron_string'] = "2 * * * *" # enqueue the job self.scheduler.enqueue_job(job) self.assertIn(job.id, tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1))) # check that next scheduled time has changed self.assertNotEqual(old_next_scheduled_time, self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id)) # check that new next scheduled time is set correctly expected_next_scheduled_time = to_unix(get_next_scheduled_time("2 * * * *")) self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id), expected_next_scheduled_time)
def testActionAdding(self): trigger = self.trigger task = self.task # Define a special class inheriting from the mixin we're about to test # so that we can (indirectly?) test the mixin itself. In some cases # the mock mechanism will have to be used, because - again - we can # only indirectly test the behavior of `action_add()`. class MockView(ActionManageMixin): def __init__(self, connection, queue, scheduler, *args, **kwargs): super().__init__(*args, **kwargs) self.object = task self.logger = MagicMock() self.connection = connection self.queue = queue self.scheduler = scheduler def get_logger(self): return self.logger def get_scheduler(self): self.get_redis_connection() return self.scheduler def get_redis_connection(self): return self.connection def get_triggers(self): objs = [trigger] triggers = MagicMock() triggers.__iter__.return_value = iter(objs) triggers.count.return_value = len(objs) return triggers def objects(self): return dict(task=self.object, event=self.object.event) @property def request(self): # fake request created thanks to RequestFactory from Django # Test Client req = RequestFactory().post('/tasks/create') return req # almost identical action object to a one that is created in the view action = NewInstructorAction( trigger=trigger, objects=dict(task=task, event=task.event), ) view = MockView(self.connection, self.queue, self.scheduler) # assertions before the view action is invoked self.assertEqual(self.scheduler.count(), 0) self.assertEqual(RQJob.objects.count(), 0) # view action invoke view.action_add(NewInstructorAction) # ensure only one job is added (because we created only one trigger # for it) self.assertEqual(self.scheduler.count(), 1) jobs = list(self.scheduler.get_jobs()) self.assertEqual(len(jobs), 1) # logger.debug is called 6 times self.assertEqual(view.get_logger().debug.call_count, 6) # test job job = jobs[0] # proper action is scheduled # accessing `instance` directly causes unppickling of stored data self.assertEqual(job.instance, action) # job appeared in the queue enqueued_job, enqueued_timestamp = list( self.scheduler.get_jobs( until=to_unix(datetime.utcnow() + action.launch_at), with_times=True, ))[0] self.assertEqual(job, enqueued_job) # job appeared in the queue with correct timestamp (we accept +- 1min) run_time = datetime.utcnow() + action.launch_at one_min = timedelta(minutes=1) self.assertTrue( (run_time + one_min) > enqueued_timestamp > (run_time - one_min)) # meta as expected self.assertEqual( job.meta, dict( action=action, template=trigger.template, launch_at=action.get_launch_at(), email=None, context=None, ), ) # test self.rq_jobs self.assertEqual(RQJob.objects.count(), 1) rqjob = RQJob.objects.first() self.assertEqual(rqjob.job_id, job.get_id()) self.assertEqual(rqjob.trigger, trigger)