def process(id_, data_dir, clean_in=10): """ Job to remove the exif data from an uploaded image. The exif data is saved as a json file. If the image had an exif thumbnail, it is saved as a separate file. """ path = os.path.join(data_dir, "{}.jpg".format(id_)) exif = ExifImage(path) exif.thumb() exif.dump() exif.clean() job = get_current_job() # schedule the cleanup task now = datetime.datetime.now() scheduler = Scheduler(queue_name=job.origin, connection=get_current_connection()) scheduler.enqueue_in(datetime.timedelta(minutes=clean_in), cleanup, id_, data_dir) removed_by = now+datetime.timedelta(minutes=clean_in) print("Added at: {}".format(now.isoformat())) print("Removed by: {}".format(removed_by.isoformat())) return { 'thumb': exif.thumb_name, 'json': exif.json_name, 'removed_around': removed_by.isoformat() }
def renderings(org_label, project_label, rendering_id): ''' deleting or downloading renderings /organizations/aquaya/projects/water-quality/renderings/4cmb1?delete=true : remove a rendering from the system and s3 /organizations/aquaya/projects/water-quality/renderings/4cmb1?download=true : remove a rendering from the system and s3 ''' user = User.objects(email=session['email'])[0] orgs = Organization.objects(label=org_label) if not orgs: abort(404) org = orgs[0] # permission-check if org not in user.organizations and not user.admin_rights: app.logger.error('%s tried to view a project but was \ denied for want of admin rights' % session['email']) abort(404) # find the project projects = Project.objects(label=project_label, organization=org) if not projects: abort(404) # find the specified rendering renderings = Rendering.objects(id=rendering_id) if not renderings: abort(404) rendering = renderings[0] # save the report for later redirect report = rendering.report if request.args.get('delete', '') == 'true': # remove the rendering utilities.delete_rendering(rendering, user.email) flash('Rendering successfully deleted.', 'success') return redirect(url_for('reports' , org_label=report.project.organization.label , project_label=report.project.label, report_label=report.label)) if request.args.get('download', '') == 'true': absolute_filename = utilities.download_rendering_from_s3(rendering) # delay the deletion so we have time to serve the file redis_config = app.config['REDIS_CONFIG'] use_connection(Redis(redis_config['host'], redis_config['port'] , password=redis_config['password'])) scheduler = Scheduler() scheduler.enqueue_in(datetime.timedelta(seconds=60) , delete_local_file, absolute_filename) return send_file(absolute_filename, as_attachment=True) else: abort(404)
def test_enqueue_in(self): """ Ensure that jobs have the right scheduled time. """ right_now = datetime.now() time_delta = timedelta(minutes=1) scheduler = Scheduler(connection=self.testconn) job = scheduler.enqueue_in(time_delta, say_hello) self.assertIn(job.id, self.testconn.zrange(scheduler.scheduled_jobs_key, 0, 1)) self.assertEqual(self.testconn.zscore(scheduler.scheduled_jobs_key, job.id), int((right_now + time_delta).strftime('%s'))) time_delta = timedelta(hours=1) job = scheduler.enqueue_in(time_delta, say_hello) self.assertEqual(self.testconn.zscore(scheduler.scheduled_jobs_key, job.id), int((right_now + time_delta).strftime('%s')))
def update_scheduled_connection(connection): ''' schedule a new scrape of the connection source interval was changed or the last job was finished and the next needs to be scheduled ''' repeating_task = connection.schedule # check to see if schedule is available -- abort if not # note that ready_to_connect does not verify this if not repeating_task.interval: return False # connect to the rq scheduler redis_config = app.config['REDIS_CONFIG'] use_connection(Redis(redis_config['host'], redis_config['port'] , password=redis_config['password'])) scheduler = Scheduler() # see if this schedule had a job that was already enqueued if repeating_task.next_task_id: # instantiate the job job = Job(id=repeating_task.next_task_id) # cancel the old job scheduler.cancel(job) # determine how many seconds to wait delay = _calculate_schedule_delay(repeating_task.interval) # start a new job job = scheduler.enqueue_in(datetime.timedelta(seconds=int(delay)) , connect_to_source, connection.id) # save this id and when it runs next repeating_task.update(set__next_task_id = job.id) repeating_task.update(set__next_run_time = (datetime.datetime.utcnow() + datetime.timedelta(seconds=delay)))
def test_enqueue_missing1(client): """Test self-healing enqueueing missing monitor jobs""" with client.application.app_context(): app = client.application app.redis.flushall() for status in [ JobExecution.Status.enqueued, JobExecution.Status.pulling, JobExecution.Status.running, JobExecution.Status.done, JobExecution.Status.failed, ]: _, job, execution = JobExecutionFixture.new_defaults() execution.status = status job.save() if status == JobExecution.Status.pulling: scheduler = Scheduler("monitor", connection=app.redis) interval = timedelta(seconds=1) scheduler.enqueue_in( interval, job_mod.monitor_job, job.task.task_id, job.job_id, execution.execution_id, ) job_mod.enqueue_missing_monitor_jobs(app) hash_key = "rq:scheduler:scheduled_jobs" res = app.redis.exists(hash_key) expect(res).to_be_true() res = app.redis.zrange(hash_key, 0, -1) expect(res).to_length(2)
def add(): #print str(request.query) url = request.query['url'] minutes_delta = int(request.query['minutes_delta']) scheduler = Scheduler(connection=Redis()) # Get a scheduler for the "default" queue # Schedule a job to run 10 minutes, 1 hour and 1 day later job = scheduler.enqueue_in(timedelta(minutes=minutes_delta), request_url, **{"url":url}) msg = u'[{}][scheduler/add] {} scheduled after {} minutes. job id {}\n'.format(datetime.now().isoformat()[:19], url, minutes_delta, job.id) logging.info(msg) os.chmod(filename_log, 777) with codecs.open(filename_log, "a", encoding="utf-8") as f: f.write(msg) return msg
def update_scheduled_send(schedule_id): ''' schedule a new sending interval was changed or the last job has finished and next needs to be scheduled ''' schedules = Schedule.objects(id=schedule_id) if not schedules: return False schedule = schedules[0] # confirm that schedule is valid if not schedule.interval: return False # connect to the rq scheduler redis_config = app.config['REDIS_CONFIG'] use_connection(Redis(redis_config['host'], redis_config['port'] , password=redis_config['password'])) scheduler = Scheduler() # see if this schedule had a job that was already enqueued if schedule.next_task_id: # instantiate the job job = Job(id=schedule.next_task_id) # cancel the old job # tried rescheduling but that was not working scheduler.cancel(job) # determine how many seconds to wait delay = _calculate_schedule_delay(schedule.interval) # start a new job job = scheduler.enqueue_in(datetime.timedelta(seconds=int(delay)) , send_scheduled_report, schedule.id) # save the id of this job and when it next runs schedule.update(set__next_task_id = job.id) schedule.update(set__next_run_time = (datetime.datetime.utcnow() + datetime.timedelta(seconds=delay)))
class TestScheduler(RQTestCase): def setUp(self): super(TestScheduler, self).setUp() self.scheduler = Scheduler(connection=self.testconn) def test_birth_and_death_registration(self): key = Scheduler.scheduler_key self.assertNotIn(key, self.testconn.keys('*')) scheduler = Scheduler(connection=self.testconn) scheduler.register_birth() self.assertIn(key, self.testconn.keys('*')) self.assertFalse(self.testconn.hexists(key, 'death')) self.assertRaises(ValueError, scheduler.register_birth) scheduler.register_death() self.assertTrue(self.testconn.hexists(key, 'death')) def test_create_job(self): """ Ensure that jobs are created properly. """ job = self.scheduler._create_job(say_hello, args=(), kwargs={}) job_from_queue = Job.fetch(job.id, connection=self.testconn) self.assertEqual(job, job_from_queue) self.assertEqual(job_from_queue.func, say_hello) def test_job_not_persisted_if_commit_false(self): """ Ensure jobs are only saved to Redis if commit=True. """ job = self.scheduler._create_job(say_hello, commit=False) self.assertEqual(self.testconn.hgetall(job.key), {}) def test_create_scheduled_job(self): """ Ensure that scheduled jobs are put in the scheduler queue with the right score """ scheduled_time = datetime.now() job = self.scheduler.enqueue_at(scheduled_time, say_hello) self.assertEqual(job, Job.fetch(job.id, connection=self.testconn)) self.assertIn( job.id, self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)) self.assertEqual( self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id), int(scheduled_time.strftime('%s'))) def test_enqueue_in(self): """ Ensure that jobs have the right scheduled time. """ right_now = datetime.now() time_delta = timedelta(minutes=1) job = self.scheduler.enqueue_in(time_delta, say_hello) self.assertIn( job.id, self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)) self.assertEqual( self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id), int((right_now + time_delta).strftime('%s'))) time_delta = timedelta(hours=1) job = self.scheduler.enqueue_in(time_delta, say_hello) self.assertEqual( self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id), int((right_now + time_delta).strftime('%s'))) def test_get_jobs_to_queue(self): """ Ensure that jobs scheduled the future are not queued. """ now = datetime.now() job = self.scheduler.enqueue_at(now, say_hello) self.assertIn(job, self.scheduler.get_jobs_to_queue()) future_time = now + timedelta(hours=1) job = self.scheduler.enqueue_at(future_time, say_hello) self.assertNotIn(job, self.scheduler.get_jobs_to_queue()) def test_enqueue_job(self): """ When scheduled job is enqueued, make sure: - Job is removed from the sorted set of scheduled jobs - "enqueued_at" attribute is properly set - Job appears in the right queue """ now = datetime.now() queue_name = 'foo' scheduler = Scheduler(connection=self.testconn, queue_name=queue_name) job = scheduler.enqueue_at(now, say_hello) self.scheduler.enqueue_job(job) self.assertNotIn( job, self.testconn.zrange(scheduler.scheduled_jobs_key, 0, 10)) job = Job.fetch(job.id, connection=self.testconn) self.assertTrue(job.enqueued_at is not None) queue = scheduler.get_queue_for_job(job) self.assertIn(job, queue.jobs) queue = Queue.from_queue_key('rq:queue:{0}'.format(queue_name)) self.assertIn(job, queue.jobs) def test_cancel_scheduled_job(self): """ When scheduled job is canceled, make sure: - Job is removed from the sorted set of scheduled jobs """ # schedule a job to be enqueued one minute from now time_delta = timedelta(minutes=1) job = self.scheduler.enqueue_in(time_delta, say_hello) # cancel the scheduled job and check that it's gone from the set self.scheduler.cancel(job) self.assertNotIn( job.id, self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)) def test_change_execution_time(self): """ Ensure ``change_execution_time`` is called, ensure that job's score is updated """ job = self.scheduler.enqueue_at(datetime.now(), say_hello) new_date = datetime(2010, 1, 1) self.scheduler.change_execution_time(job, new_date) self.assertEqual( int(new_date.strftime('%s')), self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id)) def test_args_kwargs_are_passed_correctly(self): """ Ensure that arguments and keyword arguments are properly saved to jobs. """ job = self.scheduler.enqueue_at(datetime.now(), simple_addition, 1, 1, 1) self.assertEqual(job.args, (1, 1, 1)) job = self.scheduler.enqueue_at(datetime.now(), simple_addition, z=1, y=1, x=1) self.assertEqual(job.kwargs, {'x': 1, 'y': 1, 'z': 1}) job = self.scheduler.enqueue_at(datetime.now(), simple_addition, 1, z=1, y=1) self.assertEqual(job.kwargs, {'y': 1, 'z': 1}) self.assertEqual(job.args, (1, )) time_delta = timedelta(minutes=1) job = self.scheduler.enqueue_in(time_delta, simple_addition, 1, 1, 1) self.assertEqual(job.args, (1, 1, 1)) job = self.scheduler.enqueue_in(time_delta, simple_addition, z=1, y=1, x=1) self.assertEqual(job.kwargs, {'x': 1, 'y': 1, 'z': 1}) job = self.scheduler.enqueue_in(time_delta, simple_addition, 1, z=1, y=1) self.assertEqual(job.kwargs, {'y': 1, 'z': 1}) self.assertEqual(job.args, (1, )) def test_interval_and_repeat_persisted_correctly(self): """ Ensure that interval and repeat attributes get correctly saved in Redis. """ job = self.scheduler.enqueue(datetime.now(), say_hello, interval=10, repeat=11) job_from_queue = Job.fetch(job.id, connection=self.testconn) self.assertEqual(int(job_from_queue.interval), 10) self.assertEqual(int(job_from_queue.repeat), 11) def test_repeat_without_interval_raises_error(self): # Ensure that an error is raised if repeat is specified without interval def create_job(): self.scheduler.enqueue(datetime.now(), say_hello, repeat=11) self.assertRaises(ValueError, create_job) def test_job_with_intervals_get_rescheduled(self): """ Ensure jobs with interval attribute are put back in the scheduler """ time_now = datetime.now() interval = 10 job = self.scheduler.enqueue(time_now, say_hello, interval=interval) self.scheduler.enqueue_job(job) self.assertIn( job.id, self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)) self.assertEqual( self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id), int(time_now.strftime('%s')) + interval) # Now the same thing using enqueue_periodic job = self.scheduler.enqueue_periodic(time_now, interval, None, say_hello) self.scheduler.enqueue_job(job) self.assertIn( job.id, self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)) self.assertEqual( self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id), int(time_now.strftime('%s')) + interval) def test_job_with_repeat(self): """ Ensure jobs with repeat attribute are put back in the scheduler X (repeat) number of times """ time_now = datetime.now() interval = 10 # If job is repeated once, the job shouldn't be put back in the queue job = self.scheduler.enqueue(time_now, say_hello, interval=interval, repeat=1) self.scheduler.enqueue_job(job) self.assertNotIn( job.id, self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)) # If job is repeated twice, it should only be put back in the queue once job = self.scheduler.enqueue(time_now, say_hello, interval=interval, repeat=2) self.scheduler.enqueue_job(job) self.assertIn( job.id, self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)) self.scheduler.enqueue_job(job) self.assertNotIn( job.id, self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)) time_now = datetime.now() # Now the same thing using enqueue_periodic job = self.scheduler.enqueue_periodic(time_now, interval, 1, say_hello) self.scheduler.enqueue_job(job) self.assertNotIn( job.id, self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)) # If job is repeated twice, it should only be put back in the queue once job = self.scheduler.enqueue_periodic(time_now, interval, 2, say_hello) self.scheduler.enqueue_job(job) self.assertIn( job.id, self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)) self.scheduler.enqueue_job(job) self.assertNotIn( job.id, self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)) def test_missing_jobs_removed_from_scheduler(self): """ Ensure jobs that don't exist when queued are removed from the scheduler. """ job = self.scheduler.enqueue(datetime.now(), say_hello) job.cancel() self.scheduler.get_jobs_to_queue() self.assertNotIn( job.id, self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)) def test_periodic_jobs_sets_ttl(self): """ Ensure periodic jobs set result_ttl to infinite. """ job = self.scheduler.enqueue(datetime.now(), say_hello, interval=5) job_from_queue = Job.fetch(job.id, connection=self.testconn) self.assertEqual(job.result_ttl, -1)
class TestScheduler(RQTestCase): def setUp(self): super(TestScheduler, self).setUp() self.scheduler = Scheduler(connection=self.testconn) def test_acquire_lock(self): """ When scheduler acquires a lock, besides creating a key, it should also set an expiry that's a few seconds longer than it's polling interval so it automatically expires if scheduler is unexpectedly terminated. """ key = '%s_lock' % Scheduler.scheduler_key self.assertNotIn(key, tl(self.testconn.keys('*'))) scheduler = Scheduler(connection=self.testconn, interval=20) self.assertTrue(scheduler.acquire_lock()) self.assertIn(key, tl(self.testconn.keys('*'))) self.assertEqual(self.testconn.ttl(key), 30) scheduler.remove_lock() self.assertNotIn(key, tl(self.testconn.keys('*'))) def test_no_two_schedulers_acquire_lock(self): """ Ensure that no two schedulers can acquire the lock at the same time. When removing the lock, only the scheduler which originally acquired the lock can remove the lock. """ key = '%s_lock' % Scheduler.scheduler_key self.assertNotIn(key, tl(self.testconn.keys('*'))) scheduler1 = Scheduler(connection=self.testconn, interval=20) scheduler2 = Scheduler(connection=self.testconn, interval=20) self.assertTrue(scheduler1.acquire_lock()) self.assertFalse(scheduler2.acquire_lock()) self.assertIn(key, tl(self.testconn.keys('*'))) scheduler2.remove_lock() self.assertIn(key, tl(self.testconn.keys('*'))) scheduler1.remove_lock() self.assertNotIn(key, tl(self.testconn.keys('*'))) def test_create_job(self): """ Ensure that jobs are created properly. """ job = self.scheduler._create_job(say_hello, args=(), kwargs={}) job_from_queue = Job.fetch(job.id, connection=self.testconn) self.assertEqual(job, job_from_queue) self.assertEqual(job_from_queue.func, say_hello) def test_create_job_with_ttl(self): """ Ensure that TTL is passed to RQ. """ job = self.scheduler._create_job(say_hello, ttl=2, args=(), kwargs={}) job_from_queue = Job.fetch(job.id, connection=self.testconn) self.assertEqual(2, job_from_queue.ttl) def test_create_job_with_id(self): """ Ensure that ID is passed to RQ. """ job = self.scheduler._create_job(say_hello, id='id test', args=(), kwargs={}) job_from_queue = Job.fetch(job.id, connection=self.testconn) self.assertEqual('id test', job_from_queue.id) def test_create_job_with_description(self): """ Ensure that description is passed to RQ. """ job = self.scheduler._create_job(say_hello, description='description', args=(), kwargs={}) job_from_queue = Job.fetch(job.id, connection=self.testconn) self.assertEqual('description', job_from_queue.description) def test_create_job_with_timeout(self): """ Ensure that timeout is passed to RQ. """ timeout = 13 job = self.scheduler._create_job(say_hello, timeout=13, args=(), kwargs={}) job_from_queue = Job.fetch(job.id, connection=self.testconn) self.assertEqual(timeout, job_from_queue.timeout) def test_job_not_persisted_if_commit_false(self): """ Ensure jobs are only saved to Redis if commit=True. """ job = self.scheduler._create_job(say_hello, commit=False) self.assertEqual(self.testconn.hgetall(job.key), {}) def test_create_scheduled_job(self): """ Ensure that scheduled jobs are put in the scheduler queue with the right score """ scheduled_time = datetime.utcnow() job = self.scheduler.enqueue_at(scheduled_time, say_hello) self.assertEqual(job, Job.fetch(job.id, connection=self.testconn)) self.assertIn(job.id, tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1))) self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id), to_unix(scheduled_time)) def test_create_job_with_meta(self): """ Ensure that meta information on the job is passed to rq """ expected = {'say': 'hello'} job = self.scheduler._create_job(say_hello, meta=expected) job_from_queue = Job.fetch(job.id, connection=self.testconn) self.assertEqual(expected, job_from_queue.meta) def test_enqueue_at_sets_timeout(self): """ Ensure that a job scheduled via enqueue_at can be created with a custom timeout. """ timeout = 13 job = self.scheduler.enqueue_at(datetime.utcnow(), say_hello, timeout=timeout) job_from_queue = Job.fetch(job.id, connection=self.testconn) self.assertEqual(job_from_queue.timeout, timeout) def test_enqueue_at_sets_job_id(self): """ Ensure that a job scheduled via enqueue_at can be created with a custom job id. """ job_id = 'test_id' job = self.scheduler.enqueue_at(datetime.utcnow(), say_hello, job_id=job_id) self.assertEqual(job.id, job_id) def test_enqueue_at_sets_job_ttl(self): """ Ensure that a job scheduled via enqueue_at can be created with a custom job ttl. """ job_ttl = 123456789 job = self.scheduler.enqueue_at(datetime.utcnow(), say_hello, job_ttl=job_ttl) self.assertEqual(job.ttl, job_ttl) def test_enqueue_at_sets_job_result_ttl(self): """ Ensure that a job scheduled via enqueue_at can be created with a custom result ttl. """ job_result_ttl = 1234567890 job = self.scheduler.enqueue_at(datetime.utcnow(), say_hello, job_result_ttl=job_result_ttl) self.assertEqual(job.result_ttl, job_result_ttl) def test_enqueue_at_sets_meta(self): """ Ensure that a job scheduled via enqueue_at can be created with a custom meta. """ meta = {'say': 'hello'} job = self.scheduler.enqueue_at(datetime.utcnow(), say_hello, meta=meta) self.assertEqual(job.meta, meta) def test_enqueue_in(self): """ Ensure that jobs have the right scheduled time. """ right_now = datetime.utcnow() time_delta = timedelta(minutes=1) job = self.scheduler.enqueue_in(time_delta, say_hello) self.assertIn(job.id, tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1))) self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id), to_unix(right_now + time_delta)) time_delta = timedelta(hours=1) job = self.scheduler.enqueue_in(time_delta, say_hello) self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id), to_unix(right_now + time_delta)) def test_enqueue_in_sets_timeout(self): """ Ensure that a job scheduled via enqueue_in can be created with a custom timeout. """ timeout = 13 job = self.scheduler.enqueue_in(timedelta(minutes=1), say_hello, timeout=timeout) job_from_queue = Job.fetch(job.id, connection=self.testconn) self.assertEqual(job_from_queue.timeout, timeout) def test_enqueue_in_sets_job_id(self): """ Ensure that a job scheduled via enqueue_in can be created with a custom job id. """ job_id = 'test_id' job = self.scheduler.enqueue_in(timedelta(minutes=1), say_hello, job_id=job_id) self.assertEqual(job.id, job_id) def test_enqueue_in_sets_job_ttl(self): """ Ensure that a job scheduled via enqueue_in can be created with a custom job ttl. """ job_ttl = 123456789 job = self.scheduler.enqueue_in(timedelta(minutes=1), say_hello, job_ttl=job_ttl) self.assertEqual(job.ttl, job_ttl) def test_enqueue_in_sets_job_result_ttl(self): """ Ensure that a job scheduled via enqueue_in can be created with a custom result ttl. """ job_result_ttl = 1234567890 job = self.scheduler.enqueue_in(timedelta(minutes=1), say_hello, job_result_ttl=job_result_ttl) self.assertEqual(job.result_ttl, job_result_ttl) def test_enqueue_in_sets_meta(self): """ Ensure that a job scheduled via enqueue_in sets meta. """ meta = {'say': 'hello'} job = self.scheduler.enqueue_in(timedelta(minutes=1), say_hello, meta=meta) self.assertEqual(job.meta, meta) def test_count(self): now = datetime.utcnow() self.scheduler.enqueue_at(now, say_hello) self.assertEqual(self.scheduler.count(), 1) future_time = now + timedelta(hours=1) future_test_time = now + timedelta(minutes=59, seconds=59) self.scheduler.enqueue_at(future_time, say_hello) self.assertEqual(self.scheduler.count(timedelta(minutes=59, seconds=59)), 1) self.assertEqual(self.scheduler.count(future_test_time), 1) self.assertEqual(self.scheduler.count(), 2) def test_get_jobs(self): """ Ensure get_jobs() returns all jobs until the specified time. """ now = datetime.utcnow() job = self.scheduler.enqueue_at(now, say_hello) self.assertIn(job, self.scheduler.get_jobs(now)) future_time = now + timedelta(hours=1) job = self.scheduler.enqueue_at(future_time, say_hello) self.assertIn(job, self.scheduler.get_jobs(timedelta(hours=1, seconds=1))) self.assertIn(job, [j[0] for j in self.scheduler.get_jobs(with_times=True)]) self.assertIsInstance(list(self.scheduler.get_jobs(with_times=True))[0][1], datetime) self.assertNotIn(job, self.scheduler.get_jobs(timedelta(minutes=59, seconds=59))) def test_get_jobs_slice(self): """ Ensure get_jobs() returns the appropriate slice of all jobs using offset and length. """ now = datetime.utcnow() future_time = now + timedelta(hours=1) future_test_time = now + timedelta(minutes=59, seconds=59) # Schedule each job a second later than the previous job, # otherwise Redis will return jobs that have the same scheduled time in # lexicographical order (not the order in which we enqueued them) now_jobs = [self.scheduler.enqueue_at(now + timedelta(seconds=x), say_hello) for x in range(15)] future_jobs = [self.scheduler.enqueue_at(future_time + timedelta(seconds=x), say_hello) for x in range(15)] expected_slice = now_jobs[5:] + future_jobs[:10] # last 10 from now_jobs and first 10 from future_jobs expected_until_slice = now_jobs[5:] # last 10 from now_jobs jobs = self.scheduler.get_jobs() jobs_slice = self.scheduler.get_jobs(offset=5, length=20) jobs_until_slice = self.scheduler.get_jobs(future_test_time, offset=5, length=20) self.assertEqual(now_jobs + future_jobs, list(jobs)) self.assertEqual(expected_slice, list(jobs_slice)) self.assertEqual(expected_until_slice, list(jobs_until_slice)) def test_get_jobs_to_queue(self): """ Ensure that jobs scheduled the future are not queued. """ now = datetime.utcnow() job = self.scheduler.enqueue_at(now, say_hello) self.assertIn(job, self.scheduler.get_jobs_to_queue()) future_time = now + timedelta(hours=1) job = self.scheduler.enqueue_at(future_time, say_hello) self.assertNotIn(job, self.scheduler.get_jobs_to_queue()) def test_enqueue_job(self): """ When scheduled job is enqueued, make sure: - Job is removed from the sorted set of scheduled jobs - "enqueued_at" attribute is properly set - Job appears in the right queue - Queue is recognized by rq's Queue.all() """ now = datetime.utcnow() queue_name = 'foo' scheduler = Scheduler(connection=self.testconn, queue_name=queue_name) job = scheduler.enqueue_at(now, say_hello) self.scheduler.enqueue_job(job) self.assertNotIn(job, tl(self.testconn.zrange(scheduler.scheduled_jobs_key, 0, 10))) job = Job.fetch(job.id, connection=self.testconn) self.assertTrue(job.enqueued_at is not None) queue = scheduler.get_queue_for_job(job) self.assertIn(job, queue.jobs) queue = Queue.from_queue_key('rq:queue:{0}'.format(queue_name)) self.assertIn(job, queue.jobs) self.assertIn(queue, Queue.all()) def test_enqueue_job_with_queue(self): """ Ensure that job is enqueued correctly when the scheduler is bound to a queue object. """ queue = Queue('foo', connection=self.testconn) scheduler = Scheduler(connection=self.testconn, queue=queue) job = scheduler._create_job(say_hello) scheduler_queue = scheduler.get_queue_for_job(job) self.assertEqual(queue, scheduler_queue) scheduler.enqueue_job(job) self.assertTrue(job.enqueued_at is not None) self.assertIn(job, queue.jobs) self.assertIn(queue, Queue.all()) def test_job_membership(self): now = datetime.utcnow() job = self.scheduler.enqueue_at(now, say_hello) self.assertIn(job, self.scheduler) self.assertIn(job.id, self.scheduler) self.assertNotIn("non-existing-job-id", self.scheduler) def test_cancel_scheduled_job(self): """ When scheduled job is canceled, make sure: - Job is removed from the sorted set of scheduled jobs """ # schedule a job to be enqueued one minute from now time_delta = timedelta(minutes=1) job = self.scheduler.enqueue_in(time_delta, say_hello) # cancel the scheduled job and check that it's gone from the set self.scheduler.cancel(job) self.assertNotIn(job.id, tl(self.testconn.zrange( self.scheduler.scheduled_jobs_key, 0, 1))) def test_change_execution_time(self): """ Ensure ``change_execution_time`` is called, ensure that job's score is updated """ job = self.scheduler.enqueue_at(datetime.utcnow(), say_hello) new_date = datetime(2010, 1, 1) self.scheduler.change_execution_time(job, new_date) self.assertEqual(to_unix(new_date), self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id)) self.scheduler.cancel(job) self.assertRaises(ValueError, self.scheduler.change_execution_time, job, new_date) def test_args_kwargs_are_passed_correctly(self): """ Ensure that arguments and keyword arguments are properly saved to jobs. """ job = self.scheduler.enqueue_at(datetime.utcnow(), simple_addition, 1, 1, 1) self.assertEqual(job.args, (1, 1, 1)) job = self.scheduler.enqueue_at(datetime.utcnow(), simple_addition, z=1, y=1, x=1) self.assertEqual(job.kwargs, {'x': 1, 'y': 1, 'z': 1}) job = self.scheduler.enqueue_at(datetime.utcnow(), simple_addition, 1, z=1, y=1) self.assertEqual(job.kwargs, {'y': 1, 'z': 1}) self.assertEqual(job.args, (1,)) time_delta = timedelta(minutes=1) job = self.scheduler.enqueue_in(time_delta, simple_addition, 1, 1, 1) self.assertEqual(job.args, (1, 1, 1)) job = self.scheduler.enqueue_in(time_delta, simple_addition, z=1, y=1, x=1) self.assertEqual(job.kwargs, {'x': 1, 'y': 1, 'z': 1}) job = self.scheduler.enqueue_in(time_delta, simple_addition, 1, z=1, y=1) self.assertEqual(job.kwargs, {'y': 1, 'z': 1}) self.assertEqual(job.args, (1,)) def test_interval_and_repeat_persisted_correctly(self): """ Ensure that interval and repeat attributes are correctly saved. """ job = self.scheduler.schedule(datetime.utcnow(), say_hello, interval=10, repeat=11) job_from_queue = Job.fetch(job.id, connection=self.testconn) self.assertEqual(job_from_queue.meta['interval'], 10) self.assertEqual(job_from_queue.meta['repeat'], 11) def test_crontab_persisted_correctly(self): """ Ensure that crontab attribute gets correctly saved in Redis. """ # create a job that runs one minute past each whole hour job = self.scheduler.cron("1 * * * *", say_hello) job_from_queue = Job.fetch(job.id, connection=self.testconn) self.assertEqual(job_from_queue.meta['cron_string'], "1 * * * *") # get the scheduled_time and convert it to a datetime object unix_time = self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id) datetime_time = from_unix(unix_time) # check that minute=1, seconds=0, and is within an hour assert datetime_time.minute == 1 assert datetime_time.second == 0 assert datetime_time - datetime.utcnow() < timedelta(hours=1) def test_crontab_sets_timeout(self): """ Ensure that a job scheduled via crontab can be created with a custom timeout. """ timeout = 13 job = self.scheduler.cron("1 * * * *", say_hello, timeout=timeout) job_from_queue = Job.fetch(job.id, connection=self.testconn) self.assertEqual(job_from_queue.timeout, timeout) def test_crontab_sets_id(self): """ Ensure that a job scheduled via crontab can be created with a custom id """ job_id = "hello-job-id" job = self.scheduler.cron("1 * * * *", say_hello, id=job_id) job_from_queue = Job.fetch(job.id, connection=self.testconn) self.assertEqual(job_id, job_from_queue.id) def test_crontab_sets_default_result_ttl(self): """ Ensure that a job scheduled via crontab gets proper default result_ttl (-1) periodic tasks. """ job = self.scheduler.cron("1 * * * *", say_hello) job_from_queue = Job.fetch(job.id, connection=self.testconn) self.assertEqual(-1, job_from_queue.result_ttl) def test_crontab_sets_description(self): """ Ensure that a job scheduled via crontab can be created with a custom description """ description = 'test description' job = self.scheduler.cron("1 * * * *", say_hello, description=description) job_from_queue = Job.fetch(job.id, connection=self.testconn) self.assertEqual(description, job_from_queue.description) def test_repeat_without_interval_raises_error(self): # Ensure that an error is raised if repeat is specified without interval def create_job(): self.scheduler.schedule(datetime.utcnow(), say_hello, repeat=11) self.assertRaises(ValueError, create_job) def test_job_with_intervals_get_rescheduled(self): """ Ensure jobs with interval attribute are put back in the scheduler """ time_now = datetime.utcnow() interval = 10 job = self.scheduler.schedule(time_now, say_hello, interval=interval) self.scheduler.enqueue_job(job) self.assertIn(job.id, tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1))) self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id), to_unix(time_now) + interval) def test_job_with_interval_can_set_meta(self): """ Ensure that jobs with interval attribute can be created with meta """ time_now = datetime.utcnow() interval = 10 meta = {'say': 'hello'} job = self.scheduler.schedule(time_now, say_hello, interval=interval, meta=meta) self.scheduler.enqueue_job(job) self.assertEqual(job.meta, meta) def test_job_with_crontab_get_rescheduled(self): # Create a job with a cronjob_string job = self.scheduler.cron("1 * * * *", say_hello) # current unix_time old_next_scheduled_time = self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id) # change crontab job.meta['cron_string'] = "2 * * * *" # enqueue the job self.scheduler.enqueue_job(job) self.assertIn(job.id, tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1))) # check that next scheduled time has changed self.assertNotEqual(old_next_scheduled_time, self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id)) # check that new next scheduled time is set correctly expected_next_scheduled_time = to_unix(get_next_scheduled_time("2 * * * *")) self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id), expected_next_scheduled_time) def test_job_with_repeat(self): """ Ensure jobs with repeat attribute are put back in the scheduler X (repeat) number of times """ time_now = datetime.utcnow() interval = 10 # If job is repeated once, the job shouldn't be put back in the queue job = self.scheduler.schedule(time_now, say_hello, interval=interval, repeat=1) self.scheduler.enqueue_job(job) self.assertNotIn(job.id, tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1))) # If job is repeated twice, it should only be put back in the queue once job = self.scheduler.schedule(time_now, say_hello, interval=interval, repeat=2) self.scheduler.enqueue_job(job) self.assertIn(job.id, tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1))) self.scheduler.enqueue_job(job) self.assertNotIn(job.id, tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1))) def test_missing_jobs_removed_from_scheduler(self): """ Ensure jobs that don't exist when queued are removed from the scheduler. """ job = self.scheduler.schedule(datetime.utcnow(), say_hello) job.cancel() list(self.scheduler.get_jobs_to_queue()) self.assertIn(job.id, tl(self.testconn.zrange( self.scheduler.scheduled_jobs_key, 0, 1))) job.delete() list(self.scheduler.get_jobs_to_queue()) self.assertNotIn(job.id, tl(self.testconn.zrange( self.scheduler.scheduled_jobs_key, 0, 1))) def test_periodic_jobs_sets_result_ttl(self): """ Ensure periodic jobs set result_ttl to infinite. """ job = self.scheduler.schedule(datetime.utcnow(), say_hello, interval=5) job_from_queue = Job.fetch(job.id, connection=self.testconn) self.assertEqual(job.result_ttl, -1) def test_periodic_jobs_sets_ttl(self): """ Ensure periodic jobs sets correctly ttl. """ job = self.scheduler.schedule(datetime.utcnow(), say_hello, interval=5, ttl=4) job_from_queue = Job.fetch(job.id, connection=self.testconn) self.assertEqual(job.ttl, 4) def test_periodic_jobs_sets_meta(self): """ Ensure periodic jobs sets correctly meta. """ meta = {'say': 'hello'} job = self.scheduler.schedule(datetime.utcnow(), say_hello, interval=5, meta=meta) self.assertEqual(meta, job.meta) def test_periodic_job_sets_id(self): """ Ensure that ID is passed to RQ by schedule. """ job = self.scheduler.schedule(datetime.utcnow(), say_hello, interval=5, id='id test') job_from_queue = Job.fetch(job.id, connection=self.testconn) self.assertEqual('id test', job.id) def test_periodic_job_sets_description(self): """ Ensure that description is passed to RQ by schedule. """ job = self.scheduler.schedule(datetime.utcnow(), say_hello, interval=5, description='description') job_from_queue = Job.fetch(job.id, connection=self.testconn) self.assertEqual('description', job.description) def test_run(self): """ Check correct signal handling in Scheduler.run(). """ def send_stop_signal(): """ Sleep for 1 second, then send a INT signal to ourself, so the signal handler installed by scheduler.run() is called. """ time.sleep(1) os.kill(os.getpid(), signal.SIGINT) thread = Thread(target=send_stop_signal) thread.start() self.assertRaises(SystemExit, self.scheduler.run) thread.join() def test_run_burst(self): """ Check burst mode of Scheduler.run(). """ now = datetime.utcnow() job = self.scheduler.enqueue_at(now, say_hello) self.assertIn(job, self.scheduler.get_jobs_to_queue()) self.assertEqual(len(list(self.scheduler.get_jobs())), 1) self.scheduler.run(burst=True) self.assertEqual(len(list(self.scheduler.get_jobs())), 0) def test_scheduler_w_o_explicit_connection(self): """ Ensure instantiating Scheduler w/o explicit connection works. """ s = Scheduler() self.assertEqual(s.connection, self.testconn) def test_small_float_interval(self): """ Test that scheduler accepts 'interval' of type float, less than 1 second. """ key = Scheduler.scheduler_key lock_key = '%s_lock' % Scheduler.scheduler_key self.assertNotIn(key, tl(self.testconn.keys('*'))) scheduler = Scheduler(connection=self.testconn, interval=0.1) # testing interval = 0.1 second self.assertEqual(scheduler._interval, 0.1) #acquire lock self.assertTrue(scheduler.acquire_lock()) self.assertIn(lock_key, tl(self.testconn.keys('*'))) self.assertEqual(self.testconn.ttl(lock_key), 10) # int(0.1) + 10 = 10 #enqueue a job now = datetime.utcnow() job = scheduler.enqueue_at(now, say_hello) self.assertIn(job, self.scheduler.get_jobs_to_queue()) self.assertEqual(len(list(self.scheduler.get_jobs())), 1) #remove the lock scheduler.remove_lock() #test that run works with the small floating-point interval def send_stop_signal(): """ Sleep for 1 second, then send a INT signal to ourself, so the signal handler installed by scheduler.run() is called. """ time.sleep(1) os.kill(os.getpid(), signal.SIGINT) thread = Thread(target=send_stop_signal) thread.start() self.assertRaises(SystemExit, scheduler.run) thread.join() #all jobs must have been scheduled during 1 second self.assertEqual(len(list(scheduler.get_jobs())), 0)
class RedisWorker(object): TIMEOUT = settings.JOB_TIMEOUT FREQUENCES = ["minutely", "hourly", "daily", "weekly", "monthly", "yearly"] def __init__(self): import rq import redis from rq_scheduler import Scheduler self.conn = redis.from_url(settings.REDIS_URL) self.queue = rq.Queue("default", connection=self.conn, default_timeout=RedisWorker.TIMEOUT) self.scheduler = Scheduler("high", connection=self.conn) rq.use_connection(self.conn) def run(self, collector, **kwargs): class_name = "%s.%s" % (collector.__class__.__module__, collector.__class__.__name__) collector_params = collector.get_params() if len(self.queue.all()) >= 20: warning("More than 20 jobs in the queue") return self.queue.enqueue(collector.run, collector=class_name, params=collector_params, **kwargs) def schedule_with_interval(self, date, interval_s, collector, *arg, **kwargs): date = date or datetime.datetime.now() kwargs.update( { "collector": "%s.%s" % (collector.__class__.__module__, collector.__class__.__name__), "params": collector.get_params(), } ) res = self.scheduler.schedule( scheduled_time=date, # Time for first execution func=collector.run, # Function to be queued args=arg, # Arguments passed into function when executed kwargs=kwargs, # Keyword arguments passed into function when executed interval=interval_s, # Time before the function is called again, in seconds repeat=None, # Repeat this number of times (None means repeat forever) timeout=RedisWorker.TIMEOUT, ) return res def schedule_periodically(self, date, frequence, collector, *arg, **kwargs): from brokenpromises.worker import RunAndReplaceIntTheQueuePeriodically assert frequence in RedisWorker.FREQUENCES, "frequence %s unknown." % (frequence) if frequence == "minutely": next_date = date + datetime.timedelta(minutes=1) if frequence == "hourly": next_date = date + datetime.timedelta(hours=1) if frequence == "daily": next_date = date + datetime.timedelta(days=1) if frequence == "weekly": next_date = date + datetime.timedelta(weeks=1) if frequence == "monthly": year = date.year + (date.month + 1) / 12 month = date.month % 12 + 1 day = min(date.day, calendar.monthrange(year, month)[1]) next_date = datetime.datetime(year, month, day, date.hour, date.minute, date.second) if frequence == "yearly": year = date.year + 1 day = min(date.day, calendar.monthrange(year, date.month)[1]) next_date = datetime.datetime(year, date.month, day, date.hour, date.minute, date.second) # Schedule in a wrapper which will requeue the job after self.schedule(date, RunAndReplaceIntTheQueuePeriodically(next_date, frequence, collector), *arg, **kwargs) def schedule(self, date, collector, *arg, **kwargs): res = None kwargs.update( { "collector": "%s.%s" % (collector.__class__.__module__, collector.__class__.__name__), "params": collector.get_params(), } ) if type(date) is datetime.timedelta: res = self.scheduler.enqueue_in(date, collector.run, *arg, **kwargs) elif type(date) is datetime.datetime: res = self.scheduler.enqueue_at(date, collector.run, *arg, **kwargs) return res
from datetime import datetime, timedelta from django_rq import job from .models import Notification from redis import Redis from rq_scheduler import Scheduler @job("default") def send_notifications(): notifications = Notification.objects.filter(is_send=False) for notification in notifications: result = send_email(notification.email, notification.email_content) if result is True: notification.is_send = True notification.save(update_fields=['is_send']) return True send_notifications.delay() redis_conn = Redis() scheduler = Scheduler(connection=redis_conn) # scheduler = django_rq.get_scheduler('default') job = scheduler.enqueue_in(timedelta(seconds=5), send_notifications) def send_email(email, content): # Заглушка return True
# @desc: from redis import Redis from rq import Queue from rq_scheduler import Scheduler from datetime import datetime, timedelta from rqtest.rq_test import redis_conn from rqtest.worker import task2, task4, task3, task1 class A(object): def __init__(self): self.name = 'aniu' scheduler = Scheduler(connection=redis_conn) scheduler1 = Scheduler(queue_name='low', connection=redis_conn) scheduler2 = Scheduler(queue_name='high', connection=redis_conn) a = A() if __name__ == "__main__": print('a = ', a.name) scheduler1.enqueue_in(timedelta(seconds=5), task1, a) scheduler2.enqueue_in(timedelta(seconds=10), task3, '叁大爷好') scheduler.enqueue_in(timedelta(seconds=15), task4, '撕大爷好') if 0: for job in scheduler2.get_jobs(): scheduler2.cancel(job) print('2取消任务, 任务名={0!r}'.format(job))
def monitor_job(task_id, job_id, execution_id): try: app = current_app executor = app.load_executor() job = Job.get_by_id(task_id, job_id) logger = app.logger.bind(task_id=task_id, job_id=job_id) if job is None: logger.error("Failed to retrieve task or job.") return False execution = job.get_execution_by_id(execution_id) result = executor.get_result(job.task, job, execution) logger.info( "Container result obtained.", container_status=result.status, container_exit_code=result.exit_code, ) if result.status in ( ExecutionResult.Status.created, ExecutionResult.Status.running, ): ellapsed = (datetime.utcnow() - execution.started_at).total_seconds() if ellapsed > job.metadata["timeout"]: execution.finished_at = datetime.utcnow() execution.status = JobExecution.Status.timedout execution.error = f"Job execution timed out after {ellapsed} seconds." executor.stop_job(job.task, job, execution) logger.debug( "Job execution timed out. Storing job details in mongo db.", status=execution.status, ellapsed=ellapsed, error=result.error, ) job.save() logger.info("Job execution timed out.", status=execution.status) return False scheduler = Scheduler("monitor", connection=app.redis) logger.info( "Job has not finished. Retrying monitoring in the future.", container_status=result.status, seconds=1, ) interval = timedelta(seconds=5) scheduler.enqueue_in(interval, monitor_job, task_id, job_id, execution_id) return True if (result.exit_code != 0 and "retry_count" in job.metadata and job.metadata["retry_count"] < job.metadata["retries"]): retry_logger = logger.bind( exit_code=result.exit_code, retry_count=job.metadata["retry_count"], retries=job.metadata["retries"], ) retry_logger.debug("Job failed. Enqueuing job retry...") job.metadata["retry_count"] += 1 scheduler = Scheduler("jobs", connection=current_app.redis) args = [task_id, job_id, execution.image, execution.command] factor = app.config["EXPONENTIAL_BACKOFF_FACTOR"] min_backoff = app.config["EXPONENTIAL_BACKOFF_MIN_MS"] / 1000.0 delta = timedelta(seconds=min_backoff) if job.metadata["retries"] > 0: delta = timedelta( seconds=math.pow(factor, job.metadata["retry_count"]) * min_backoff) dt = datetime.utcnow() + delta enqueued = scheduler.enqueue_at(dt, run_job, *args) job.metadata["enqueued_id"] = enqueued.id job.save() retry_logger.info("Job execution enqueued successfully.") # still need to finish current execution as the retry # will be a new execution execution.finished_at = datetime.utcnow() execution.exit_code = result.exit_code execution.status = (JobExecution.Status.done if execution.exit_code == 0 else JobExecution.Status.failed) execution.log = result.log.decode("utf-8") execution.error = result.error.decode("utf-8") logger.debug( "Job finished. Storing job details in mongo db.", status=execution.status, log=result.log, error=result.error, ) job.save() logger.info("Job details stored in mongo db.", status=execution.status) return True except Exception as err: logger.error("Failed to monitor job", error=err) current_app.report_error( err, metadata=dict( operation="Monitoring Job", task_id=task_id, job_id=job_id, execution_id=execution_id, ), ) raise err
class RedisWorker(object): TIMEOUT = settings.JOB_TIMEOUT FREQUENCES = ["minutely", "hourly", "daily", "weekly", "monthly", "yearly"] def __init__(self): import rq import redis from rq_scheduler import Scheduler self.conn = redis.from_url(settings.REDIS_URL) self.queue = rq.Queue("default", connection=self.conn, default_timeout=RedisWorker.TIMEOUT) self.scheduler = Scheduler("high", connection=self.conn) rq.use_connection(self.conn) def run(self, collector, **kwargs): class_name = "%s.%s" % (collector.__class__.__module__, collector.__class__.__name__) collector_params = collector.get_params() if len(self.queue.all()) >= 20: warning("More than 20 jobs in the queue") return self.queue.enqueue(collector.run, collector=class_name, params=collector_params, **kwargs) def schedule_with_interval(self, date, interval_s, collector, *arg, **kwargs): date = date or datetime.datetime.now() kwargs.update({ "collector": "%s.%s" % (collector.__class__.__module__, collector.__class__.__name__), "params": collector.get_params() }) res = self.scheduler.schedule( scheduled_time=date, # Time for first execution func=collector.run, # Function to be queued args=arg, # Arguments passed into function when executed kwargs= kwargs, # Keyword arguments passed into function when executed interval= interval_s, # Time before the function is called again, in seconds repeat= None, # Repeat this number of times (None means repeat forever) timeout=RedisWorker.TIMEOUT) return res def schedule_periodically(self, date, frequence, collector, *arg, **kwargs): from brokenpromises.worker import RunAndReplaceIntTheQueuePeriodically assert frequence in RedisWorker.FREQUENCES, "frequence %s unknown." % ( frequence) if frequence == "minutely": next_date = date + datetime.timedelta(minutes=1) if frequence == "hourly": next_date = date + datetime.timedelta(hours=1) if frequence == "daily": next_date = date + datetime.timedelta(days=1) if frequence == "weekly": next_date = date + datetime.timedelta(weeks=1) if frequence == "monthly": year = date.year + (date.month + 1) / 12 month = date.month % 12 + 1 day = min(date.day, calendar.monthrange(year, month)[1]) next_date = datetime.datetime(year, month, day, date.hour, date.minute, date.second) if frequence == "yearly": year = date.year + 1 day = min(date.day, calendar.monthrange(year, date.month)[1]) next_date = datetime.datetime(year, date.month, day, date.hour, date.minute, date.second) # Schedule in a wrapper which will requeue the job after self.schedule( date, RunAndReplaceIntTheQueuePeriodically(next_date, frequence, collector), *arg, **kwargs) def schedule(self, date, collector, *arg, **kwargs): res = None kwargs.update({ "collector": "%s.%s" % (collector.__class__.__module__, collector.__class__.__name__), "params": collector.get_params() }) if type(date) is datetime.timedelta: res = self.scheduler.enqueue_in(date, collector.run, *arg, **kwargs) elif type(date) is datetime.datetime: res = self.scheduler.enqueue_at(date, collector.run, *arg, **kwargs) return res
class TestScheduler(RQTestCase): def setUp(self): super(TestScheduler, self).setUp() self.scheduler = Scheduler(connection=self.testconn) def test_acquire_lock(self): """ When scheduler acquires a lock, besides creating a key, it should also set an expiry that's a few seconds longer than it's polling interval so it automatically expires if scheduler is unexpectedly terminated. """ key = '%s_lock' % Scheduler.scheduler_key self.assertNotIn(key, tl(self.testconn.keys('*'))) scheduler = Scheduler(connection=self.testconn, interval=20) self.assertTrue(scheduler.acquire_lock()) self.assertIn(key, tl(self.testconn.keys('*'))) self.assertEqual(self.testconn.ttl(key), 30) scheduler.remove_lock() self.assertNotIn(key, tl(self.testconn.keys('*'))) def test_no_two_schedulers_acquire_lock(self): """ Ensure that no two schedulers can acquire the lock at the same time. When removing the lock, only the scheduler which originally acquired the lock can remove the lock. """ key = '%s_lock' % Scheduler.scheduler_key self.assertNotIn(key, tl(self.testconn.keys('*'))) scheduler1 = Scheduler(connection=self.testconn, interval=20) scheduler2 = Scheduler(connection=self.testconn, interval=20) self.assertTrue(scheduler1.acquire_lock()) self.assertFalse(scheduler2.acquire_lock()) self.assertIn(key, tl(self.testconn.keys('*'))) scheduler2.remove_lock() self.assertIn(key, tl(self.testconn.keys('*'))) scheduler1.remove_lock() self.assertNotIn(key, tl(self.testconn.keys('*'))) def test_create_job(self): """ Ensure that jobs are created properly. """ job = self.scheduler._create_job(say_hello, args=(), kwargs={}) job_from_queue = Job.fetch(job.id, connection=self.testconn) self.assertEqual(job, job_from_queue) self.assertEqual(job_from_queue.func, say_hello) def test_create_job_with_ttl(self): """ Ensure that TTL is passed to RQ. """ job = self.scheduler._create_job(say_hello, ttl=2, args=(), kwargs={}) job_from_queue = Job.fetch(job.id, connection=self.testconn) self.assertEqual(2, job_from_queue.ttl) def test_create_job_with_id(self): """ Ensure that ID is passed to RQ. """ job = self.scheduler._create_job(say_hello, id='id test', args=(), kwargs={}) job_from_queue = Job.fetch(job.id, connection=self.testconn) self.assertEqual('id test', job_from_queue.id) def test_create_job_with_description(self): """ Ensure that description is passed to RQ. """ job = self.scheduler._create_job(say_hello, description='description', args=(), kwargs={}) job_from_queue = Job.fetch(job.id, connection=self.testconn) self.assertEqual('description', job_from_queue.description) def test_create_job_with_timeout(self): """ Ensure that timeout is passed to RQ. """ timeout = 13 job = self.scheduler._create_job(say_hello, timeout=13, args=(), kwargs={}) job_from_queue = Job.fetch(job.id, connection=self.testconn) self.assertEqual(timeout, job_from_queue.timeout) def test_job_not_persisted_if_commit_false(self): """ Ensure jobs are only saved to Redis if commit=True. """ job = self.scheduler._create_job(say_hello, commit=False) self.assertEqual(self.testconn.hgetall(job.key), {}) def test_create_scheduled_job(self): """ Ensure that scheduled jobs are put in the scheduler queue with the right score """ scheduled_time = datetime.utcnow() job = self.scheduler.enqueue_at(scheduled_time, say_hello) self.assertEqual(job, Job.fetch(job.id, connection=self.testconn)) self.assertIn( job.id, tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1))) self.assertEqual( self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id), to_unix(scheduled_time)) def test_create_job_with_meta(self): """ Ensure that meta information on the job is passed to rq """ expected = {'say': 'hello'} job = self.scheduler._create_job(say_hello, meta=expected) job_from_queue = Job.fetch(job.id, connection=self.testconn) self.assertEqual(expected, job_from_queue.meta) def test_enqueue_at_sets_timeout(self): """ Ensure that a job scheduled via enqueue_at can be created with a custom timeout. """ timeout = 13 job = self.scheduler.enqueue_at(datetime.utcnow(), say_hello, timeout=timeout) job_from_queue = Job.fetch(job.id, connection=self.testconn) self.assertEqual(job_from_queue.timeout, timeout) def test_enqueue_at_sets_job_id(self): """ Ensure that a job scheduled via enqueue_at can be created with a custom job id. """ job_id = 'test_id' job = self.scheduler.enqueue_at(datetime.utcnow(), say_hello, job_id=job_id) self.assertEqual(job.id, job_id) def test_enqueue_at_sets_job_ttl(self): """ Ensure that a job scheduled via enqueue_at can be created with a custom job ttl. """ job_ttl = 123456789 job = self.scheduler.enqueue_at(datetime.utcnow(), say_hello, job_ttl=job_ttl) self.assertEqual(job.ttl, job_ttl) def test_enqueue_at_sets_job_result_ttl(self): """ Ensure that a job scheduled via enqueue_at can be created with a custom result ttl. """ job_result_ttl = 1234567890 job = self.scheduler.enqueue_at(datetime.utcnow(), say_hello, job_result_ttl=job_result_ttl) self.assertEqual(job.result_ttl, job_result_ttl) def test_enqueue_at_sets_meta(self): """ Ensure that a job scheduled via enqueue_at can be created with a custom meta. """ meta = {'say': 'hello'} job = self.scheduler.enqueue_at(datetime.utcnow(), say_hello, meta=meta) self.assertEqual(job.meta, meta) def test_enqueue_in(self): """ Ensure that jobs have the right scheduled time. """ right_now = datetime.utcnow() time_delta = timedelta(minutes=1) job = self.scheduler.enqueue_in(time_delta, say_hello) self.assertIn( job.id, tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1))) self.assertEqual( self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id), to_unix(right_now + time_delta)) time_delta = timedelta(hours=1) job = self.scheduler.enqueue_in(time_delta, say_hello) self.assertEqual( self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id), to_unix(right_now + time_delta)) def test_enqueue_in_sets_timeout(self): """ Ensure that a job scheduled via enqueue_in can be created with a custom timeout. """ timeout = 13 job = self.scheduler.enqueue_in(timedelta(minutes=1), say_hello, timeout=timeout) job_from_queue = Job.fetch(job.id, connection=self.testconn) self.assertEqual(job_from_queue.timeout, timeout) def test_enqueue_in_sets_job_id(self): """ Ensure that a job scheduled via enqueue_in can be created with a custom job id. """ job_id = 'test_id' job = self.scheduler.enqueue_in(timedelta(minutes=1), say_hello, job_id=job_id) self.assertEqual(job.id, job_id) def test_enqueue_in_sets_job_ttl(self): """ Ensure that a job scheduled via enqueue_in can be created with a custom job ttl. """ job_ttl = 123456789 job = self.scheduler.enqueue_in(timedelta(minutes=1), say_hello, job_ttl=job_ttl) self.assertEqual(job.ttl, job_ttl) def test_enqueue_in_sets_job_result_ttl(self): """ Ensure that a job scheduled via enqueue_in can be created with a custom result ttl. """ job_result_ttl = 1234567890 job = self.scheduler.enqueue_in(timedelta(minutes=1), say_hello, job_result_ttl=job_result_ttl) self.assertEqual(job.result_ttl, job_result_ttl) def test_enqueue_in_sets_meta(self): """ Ensure that a job scheduled via enqueue_in sets meta. """ meta = {'say': 'hello'} job = self.scheduler.enqueue_in(timedelta(minutes=1), say_hello, meta=meta) self.assertEqual(job.meta, meta) def test_count(self): now = datetime.utcnow() self.scheduler.enqueue_at(now, say_hello) self.assertEqual(self.scheduler.count(), 1) future_time = now + timedelta(hours=1) future_test_time = now + timedelta(minutes=59, seconds=59) self.scheduler.enqueue_at(future_time, say_hello) self.assertEqual( self.scheduler.count(timedelta(minutes=59, seconds=59)), 1) self.assertEqual(self.scheduler.count(future_test_time), 1) self.assertEqual(self.scheduler.count(), 2) def test_get_jobs(self): """ Ensure get_jobs() returns all jobs until the specified time. """ now = datetime.utcnow() job = self.scheduler.enqueue_at(now, say_hello) self.assertIn(job, self.scheduler.get_jobs(now)) future_time = now + timedelta(hours=1) job = self.scheduler.enqueue_at(future_time, say_hello) self.assertIn(job, self.scheduler.get_jobs(timedelta(hours=1, seconds=1))) self.assertIn(job, [j[0] for j in self.scheduler.get_jobs(with_times=True)]) self.assertIsInstance( list(self.scheduler.get_jobs(with_times=True))[0][1], datetime) self.assertNotIn( job, self.scheduler.get_jobs(timedelta(minutes=59, seconds=59))) def test_get_jobs_slice(self): """ Ensure get_jobs() returns the appropriate slice of all jobs using offset and length. """ now = datetime.utcnow() future_time = now + timedelta(hours=1) future_test_time = now + timedelta(minutes=59, seconds=59) # Schedule each job a second later than the previous job, # otherwise Redis will return jobs that have the same scheduled time in # lexicographical order (not the order in which we enqueued them) now_jobs = [ self.scheduler.enqueue_at(now + timedelta(seconds=x), say_hello) for x in range(15) ] future_jobs = [ self.scheduler.enqueue_at(future_time + timedelta(seconds=x), say_hello) for x in range(15) ] expected_slice = now_jobs[ 5:] + future_jobs[: 10] # last 10 from now_jobs and first 10 from future_jobs expected_until_slice = now_jobs[5:] # last 10 from now_jobs jobs = self.scheduler.get_jobs() jobs_slice = self.scheduler.get_jobs(offset=5, length=20) jobs_until_slice = self.scheduler.get_jobs(future_test_time, offset=5, length=20) self.assertEqual(now_jobs + future_jobs, list(jobs)) self.assertEqual(expected_slice, list(jobs_slice)) self.assertEqual(expected_until_slice, list(jobs_until_slice)) def test_get_jobs_to_queue(self): """ Ensure that jobs scheduled the future are not queued. """ now = datetime.utcnow() job = self.scheduler.enqueue_at(now, say_hello) self.assertIn(job, self.scheduler.get_jobs_to_queue()) future_time = now + timedelta(hours=1) job = self.scheduler.enqueue_at(future_time, say_hello) self.assertNotIn(job, self.scheduler.get_jobs_to_queue()) def test_enqueue_job(self): """ When scheduled job is enqueued, make sure: - Job is removed from the sorted set of scheduled jobs - "enqueued_at" attribute is properly set - Job appears in the right queue - Queue is recognized by rq's Queue.all() """ now = datetime.utcnow() queue_name = 'foo' scheduler = Scheduler(connection=self.testconn, queue_name=queue_name) job = scheduler.enqueue_at(now, say_hello) self.scheduler.enqueue_job(job) self.assertNotIn( job, tl(self.testconn.zrange(scheduler.scheduled_jobs_key, 0, 10))) job = Job.fetch(job.id, connection=self.testconn) self.assertTrue(job.enqueued_at is not None) queue = scheduler.get_queue_for_job(job) self.assertIn(job, queue.jobs) queue = Queue.from_queue_key('rq:queue:{0}'.format(queue_name)) self.assertIn(job, queue.jobs) self.assertIn(queue, Queue.all()) def test_enqueue_job_with_scheduler_queue(self): """ Ensure that job is enqueued correctly when the scheduler is bound to a queue object and job queue name is not provided. """ queue = Queue('foo', connection=self.testconn) scheduler = Scheduler(connection=self.testconn, queue=queue) job = scheduler._create_job(say_hello) scheduler_queue = scheduler.get_queue_for_job(job) self.assertEqual(queue, scheduler_queue) scheduler.enqueue_job(job) self.assertTrue(job.enqueued_at is not None) self.assertIn(job, queue.jobs) self.assertIn(queue, Queue.all()) def test_enqueue_job_with_job_queue_name(self): """ Ensure that job is enqueued correctly when queue_name is provided at job creation """ queue = Queue('foo', connection=self.testconn) job_queue = Queue('job_foo', connection=self.testconn) scheduler = Scheduler(connection=self.testconn, queue=queue) job = scheduler._create_job(say_hello, queue_name='job_foo') self.assertEqual(scheduler.get_queue_for_job(job), job_queue) scheduler.enqueue_job(job) self.assertTrue(job.enqueued_at is not None) self.assertIn(job, job_queue.jobs) self.assertIn(job_queue, Queue.all()) def test_enqueue_at_with_job_queue_name(self): """ Ensure that job is enqueued correctly when queue_name is provided to enqueue_at """ queue = Queue('foo', connection=self.testconn) job_queue = Queue('job_foo', connection=self.testconn) scheduler = Scheduler(connection=self.testconn, queue=queue) job = scheduler.enqueue_at(datetime.utcnow(), say_hello, queue_name='job_foo') self.assertEqual(scheduler.get_queue_for_job(job), job_queue) self.scheduler.enqueue_job(job) self.assertTrue(job.enqueued_at is not None) self.assertIn(job, job_queue.jobs) self.assertIn(job_queue, Queue.all()) def test_job_membership(self): now = datetime.utcnow() job = self.scheduler.enqueue_at(now, say_hello) self.assertIn(job, self.scheduler) self.assertIn(job.id, self.scheduler) self.assertNotIn("non-existing-job-id", self.scheduler) def test_cancel_scheduled_job(self): """ When scheduled job is canceled, make sure: - Job is removed from the sorted set of scheduled jobs """ # schedule a job to be enqueued one minute from now time_delta = timedelta(minutes=1) job = self.scheduler.enqueue_in(time_delta, say_hello) # cancel the scheduled job and check that it's gone from the set self.scheduler.cancel(job) self.assertNotIn( job.id, tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1))) def test_change_execution_time(self): """ Ensure ``change_execution_time`` is called, ensure that job's score is updated """ job = self.scheduler.enqueue_at(datetime.utcnow(), say_hello) new_date = datetime(2010, 1, 1) self.scheduler.change_execution_time(job, new_date) self.assertEqual( to_unix(new_date), self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id)) self.scheduler.cancel(job) self.assertRaises(ValueError, self.scheduler.change_execution_time, job, new_date) def test_args_kwargs_are_passed_correctly(self): """ Ensure that arguments and keyword arguments are properly saved to jobs. """ job = self.scheduler.enqueue_at(datetime.utcnow(), simple_addition, 1, 1, 1) self.assertEqual(job.args, (1, 1, 1)) job = self.scheduler.enqueue_at(datetime.utcnow(), simple_addition, z=1, y=1, x=1) self.assertEqual(job.kwargs, {'x': 1, 'y': 1, 'z': 1}) job = self.scheduler.enqueue_at(datetime.utcnow(), simple_addition, 1, z=1, y=1) self.assertEqual(job.kwargs, {'y': 1, 'z': 1}) self.assertEqual(job.args, (1, )) time_delta = timedelta(minutes=1) job = self.scheduler.enqueue_in(time_delta, simple_addition, 1, 1, 1) self.assertEqual(job.args, (1, 1, 1)) job = self.scheduler.enqueue_in(time_delta, simple_addition, z=1, y=1, x=1) self.assertEqual(job.kwargs, {'x': 1, 'y': 1, 'z': 1}) job = self.scheduler.enqueue_in(time_delta, simple_addition, 1, z=1, y=1) self.assertEqual(job.kwargs, {'y': 1, 'z': 1}) self.assertEqual(job.args, (1, )) def test_interval_and_repeat_persisted_correctly(self): """ Ensure that interval and repeat attributes are correctly saved. """ job = self.scheduler.schedule(datetime.utcnow(), say_hello, interval=10, repeat=11) job_from_queue = Job.fetch(job.id, connection=self.testconn) self.assertEqual(job_from_queue.meta['interval'], 10) self.assertEqual(job_from_queue.meta['repeat'], 11) def test_crontab_persisted_correctly(self): """ Ensure that crontab attribute gets correctly saved in Redis. """ # create a job that runs one minute past each whole hour job = self.scheduler.cron("1 * * * *", say_hello) job_from_queue = Job.fetch(job.id, connection=self.testconn) self.assertEqual(job_from_queue.meta['cron_string'], "1 * * * *") # get the scheduled_time and convert it to a datetime object unix_time = self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id) datetime_time = from_unix(unix_time) # check that minute=1, seconds=0, and is within an hour assert datetime_time.minute == 1 assert datetime_time.second == 0 assert datetime_time - datetime.utcnow() < timedelta(hours=1) def test_crontab_persisted_correctly_with_local_timezone(self): """ Ensure that crontab attribute gets correctly saved in Redis when using local TZ. """ # create a job that runs one minute past each whole hour job = self.scheduler.cron("0 15 * * *", say_hello, use_local_timezone=True) job_from_queue = Job.fetch(job.id, connection=self.testconn) self.assertEqual(job_from_queue.meta['cron_string'], "0 15 * * *") # get the scheduled_time and convert it to a datetime object unix_time = self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id) datetime_time = from_unix(unix_time) expected_datetime_in_local_tz = datetime.now( get_utc_timezone()).replace(hour=15, minute=0, second=0, microsecond=0) assert datetime_time.time( ) == expected_datetime_in_local_tz.astimezone( get_utc_timezone()).time() def test_crontab_rescheduled_correctly_with_local_timezone(self): # Create a job with a cronjob_string job = self.scheduler.cron("1 15 * * *", say_hello, use_local_timezone=True) # change crontab job.meta['cron_string'] = "2 15 * * *" # reenqueue the job self.scheduler.enqueue_job(job) # get the scheduled_time and convert it to a datetime object unix_time = self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id) datetime_time = from_unix(unix_time) expected_datetime_in_local_tz = datetime.now( get_utc_timezone()).replace(hour=15, minute=2, second=0, microsecond=0) assert datetime_time.time( ) == expected_datetime_in_local_tz.astimezone( get_utc_timezone()).time() def test_crontab_sets_timeout(self): """ Ensure that a job scheduled via crontab can be created with a custom timeout. """ timeout = 13 job = self.scheduler.cron("1 * * * *", say_hello, timeout=timeout) job_from_queue = Job.fetch(job.id, connection=self.testconn) self.assertEqual(job_from_queue.timeout, timeout) def test_crontab_sets_id(self): """ Ensure that a job scheduled via crontab can be created with a custom id """ job_id = "hello-job-id" job = self.scheduler.cron("1 * * * *", say_hello, id=job_id) job_from_queue = Job.fetch(job.id, connection=self.testconn) self.assertEqual(job_id, job_from_queue.id) def test_crontab_sets_default_result_ttl(self): """ Ensure that a job scheduled via crontab gets proper default result_ttl (-1) periodic tasks. """ job = self.scheduler.cron("1 * * * *", say_hello) job_from_queue = Job.fetch(job.id, connection=self.testconn) self.assertEqual(-1, job_from_queue.result_ttl) def test_crontab_sets_description(self): """ Ensure that a job scheduled via crontab can be created with a custom description """ description = 'test description' job = self.scheduler.cron("1 * * * *", say_hello, description=description) job_from_queue = Job.fetch(job.id, connection=self.testconn) self.assertEqual(description, job_from_queue.description) def test_repeat_without_interval_raises_error(self): # Ensure that an error is raised if repeat is specified without interval def create_job(): self.scheduler.schedule(datetime.utcnow(), say_hello, repeat=11) self.assertRaises(ValueError, create_job) def test_job_with_intervals_get_rescheduled(self): """ Ensure jobs with interval attribute are put back in the scheduler """ time_now = datetime.utcnow() interval = 10 job = self.scheduler.schedule(time_now, say_hello, interval=interval) self.scheduler.enqueue_job(job) self.assertIn( job.id, tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1))) self.assertEqual( self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id), to_unix(time_now) + interval) def test_job_with_interval_can_set_meta(self): """ Ensure that jobs with interval attribute can be created with meta """ time_now = datetime.utcnow() interval = 10 meta = {'say': 'hello'} job = self.scheduler.schedule(time_now, say_hello, interval=interval, meta=meta) self.scheduler.enqueue_job(job) self.assertEqual(job.meta, meta) def test_job_with_crontab_get_rescheduled(self): # Create a job with a cronjob_string job = self.scheduler.cron("1 * * * *", say_hello) # current unix_time old_next_scheduled_time = self.testconn.zscore( self.scheduler.scheduled_jobs_key, job.id) # change crontab job.meta['cron_string'] = "2 * * * *" # enqueue the job self.scheduler.enqueue_job(job) self.assertIn( job.id, tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1))) # check that next scheduled time has changed self.assertNotEqual( old_next_scheduled_time, self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id)) # check that new next scheduled time is set correctly expected_next_scheduled_time = to_unix( get_next_scheduled_time("2 * * * *")) self.assertEqual( self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id), expected_next_scheduled_time) def test_job_with_repeat(self): """ Ensure jobs with repeat attribute are put back in the scheduler X (repeat) number of times """ time_now = datetime.utcnow() interval = 10 # If job is repeated once, the job shouldn't be put back in the queue job = self.scheduler.schedule(time_now, say_hello, interval=interval, repeat=1) self.scheduler.enqueue_job(job) self.assertNotIn( job.id, tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1))) # If job is repeated twice, it should only be put back in the queue once job = self.scheduler.schedule(time_now, say_hello, interval=interval, repeat=2) self.scheduler.enqueue_job(job) self.assertIn( job.id, tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1))) self.scheduler.enqueue_job(job) self.assertNotIn( job.id, tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1))) def test_missing_jobs_removed_from_scheduler(self): """ Ensure jobs that don't exist when queued are removed from the scheduler. """ job = self.scheduler.schedule(datetime.utcnow(), say_hello) job.cancel() list(self.scheduler.get_jobs_to_queue()) self.assertIn( job.id, tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1))) job.delete() list(self.scheduler.get_jobs_to_queue()) self.assertNotIn( job.id, tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1))) def test_periodic_jobs_sets_result_ttl(self): """ Ensure periodic jobs set result_ttl to infinite. """ job = self.scheduler.schedule(datetime.utcnow(), say_hello, interval=5) job_from_queue = Job.fetch(job.id, connection=self.testconn) self.assertEqual(job.result_ttl, -1) def test_periodic_jobs_sets_ttl(self): """ Ensure periodic jobs sets correctly ttl. """ job = self.scheduler.schedule(datetime.utcnow(), say_hello, interval=5, ttl=4) job_from_queue = Job.fetch(job.id, connection=self.testconn) self.assertEqual(job.ttl, 4) def test_periodic_jobs_sets_meta(self): """ Ensure periodic jobs sets correctly meta. """ meta = {'say': 'hello'} job = self.scheduler.schedule(datetime.utcnow(), say_hello, interval=5, meta=meta) self.assertEqual(meta, job.meta) def test_periodic_job_sets_id(self): """ Ensure that ID is passed to RQ by schedule. """ job = self.scheduler.schedule(datetime.utcnow(), say_hello, interval=5, id='id test') job_from_queue = Job.fetch(job.id, connection=self.testconn) self.assertEqual('id test', job.id) def test_periodic_job_sets_description(self): """ Ensure that description is passed to RQ by schedule. """ job = self.scheduler.schedule(datetime.utcnow(), say_hello, interval=5, description='description') job_from_queue = Job.fetch(job.id, connection=self.testconn) self.assertEqual('description', job.description) def test_run(self): """ Check correct signal handling in Scheduler.run(). """ def send_stop_signal(): """ Sleep for 1 second, then send a INT signal to ourself, so the signal handler installed by scheduler.run() is called. """ time.sleep(1) os.kill(os.getpid(), signal.SIGINT) thread = Thread(target=send_stop_signal) thread.start() self.assertRaises(SystemExit, self.scheduler.run) thread.join() def test_run_burst(self): """ Check burst mode of Scheduler.run(). """ now = datetime.utcnow() job = self.scheduler.enqueue_at(now, say_hello) self.assertIn(job, self.scheduler.get_jobs_to_queue()) self.assertEqual(len(list(self.scheduler.get_jobs())), 1) self.scheduler.run(burst=True) self.assertEqual(len(list(self.scheduler.get_jobs())), 0) def test_scheduler_w_o_explicit_connection(self): """ Ensure instantiating Scheduler w/o explicit connection works. """ s = Scheduler() self.assertEqual(s.connection, self.testconn) def test_small_float_interval(self): """ Test that scheduler accepts 'interval' of type float, less than 1 second. """ key = Scheduler.scheduler_key lock_key = '%s_lock' % Scheduler.scheduler_key self.assertNotIn(key, tl(self.testconn.keys('*'))) scheduler = Scheduler(connection=self.testconn, interval=0.1) # testing interval = 0.1 second self.assertEqual(scheduler._interval, 0.1) #acquire lock self.assertTrue(scheduler.acquire_lock()) self.assertIn(lock_key, tl(self.testconn.keys('*'))) self.assertEqual(self.testconn.ttl(lock_key), 10) # int(0.1) + 10 = 10 #enqueue a job now = datetime.utcnow() job = scheduler.enqueue_at(now, say_hello) self.assertIn(job, self.scheduler.get_jobs_to_queue()) self.assertEqual(len(list(self.scheduler.get_jobs())), 1) #remove the lock scheduler.remove_lock() #test that run works with the small floating-point interval def send_stop_signal(): """ Sleep for 1 second, then send a INT signal to ourself, so the signal handler installed by scheduler.run() is called. """ time.sleep(1) os.kill(os.getpid(), signal.SIGINT) thread = Thread(target=send_stop_signal) thread.start() self.assertRaises(SystemExit, scheduler.run) thread.join() #all jobs must have been scheduled during 1 second self.assertEqual(len(list(scheduler.get_jobs())), 0) def test_get_queue_for_job_with_job_queue_name(self): """ Tests that scheduler gets the correct queue for the job when queue_name is provided. """ queue = Queue('scheduler_foo', connection=self.testconn) job_queue = Queue('job_foo', connection=self.testconn) scheduler = Scheduler(connection=self.testconn, queue=queue) job = scheduler._create_job(say_hello, queue_name='job_foo') self.assertEqual(scheduler.get_queue_for_job(job), job_queue) def test_get_queue_for_job_without_job_queue_name(self): """ Tests that scheduler gets the scheduler queue for the job when queue name is not provided for that job. """ queue = Queue('scheduler_foo', connection=self.testconn) scheduler = Scheduler(connection=self.testconn, queue=queue) job = scheduler._create_job(say_hello) self.assertEqual(scheduler.get_queue_for_job(job), queue)
def entries(org_label, project_label, entry_id): ''' show a specific entry mostly for editing purposes ''' user = User.objects(email=session['email'])[0] # find the org orgs = Organization.objects(label=org_label) if not orgs: flash('Organization "%s" not found, sorry!' % org_label, 'warning') return redirect(url_for('organizations')) org = orgs[0] # permission-check if org not in user.organizations and not user.admin_rights: app.logger.error('%s tried to view a project but was \ denied for want of admin rights' % session['email']) abort(404) # find the project projects = Project.objects(label=project_label, organization=org) if not projects: flash('Project "%s" not found, sorry!' % project_label, 'warning') return redirect(url_for('organizations', org_label=org.label)) project = projects[0] if not entry_id: if request.method == 'POST': # downloading entries with applied filters filter_labels = request.form.getlist('filters') apply_any_filters = request.form.get('apply_any_filters', '') if apply_any_filters == 'true': apply_any_filters = True else: apply_any_filters = False # make a list of filter objects filters = [] for filter_label in filter_labels: filters.append(Filter.objects(label=filter_label , project=project)[0]) # serve up a file of the project entries absolute_filename = utilities.download_all_entries(project , filters, format='xls', apply_any_filters=apply_any_filters) # delay the deletion so we have time to serve the file redis_config = app.config['REDIS_CONFIG'] use_connection(Redis(redis_config['host'], redis_config['port'] , password=redis_config['password'])) scheduler = Scheduler() scheduler.enqueue_in(datetime.timedelta(seconds=60) , delete_local_file, absolute_filename) return send_file(absolute_filename, as_attachment=True) '''GET request.. display entries for the project ''' duplicate_entries_count = Entry.objects(project=project , unique=False).count() hidden_entries_count = Entry.objects(project=project, unique=True , visible=False).count() unique_entries_count = Entry.objects(project=project, unique=True , visible=True).count() entry_type = request.args.get('type', '') if entry_type == 'duplicate': # show unique=False entries count = duplicate_entries_count unique = False visible = None template = 'project_duplicate_entries.html' elif entry_type == 'hidden': # show visible=False, unique=True entries count = hidden_entries_count unique = True visible = False template = 'project_hidden_entries.html' else: # show uniques count = unique_entries_count unique = True visible = True template = 'project_entries.html' entries_per_page = 10 pages = utilities.calculate_pages(count , entries_per_page=entries_per_page) # validate the requested page current_page = utilities.validate_page_request( request.args.get('page', 1), pages) entries = utilities.query_entries(project , unique=unique , visible=visible , skip=(entries_per_page * (current_page - 1)) , limit=entries_per_page) # present some filters if data is downloaded available_filters = Filter.objects(project=project) return render_template(template , project=project , entries=entries , unique_entries_count=unique_entries_count , duplicate_entries_count=duplicate_entries_count , hidden_entries_count=hidden_entries_count , available_filters=available_filters , current_page = current_page , number_of_pages = pages) # we have an entry_id, try to find the object entries = Entry.objects(id=entry_id) if not entries: flash('Entry "%s" not found, sorry!' % entry_id, 'warning') return redirect(url_for('entries', org_label=org.label , project_label=project.label)) entry = entries[0] if request.method == 'GET': if request.args.get('edit', '') == 'true': return render_template('entry_edit.html', entry=entry) else: return render_template('entry.html', entry=entry) elif request.method == 'POST': form_type = request.form.get('form_type', '') if form_type == 'info': # track all modifications to this entry modifications = [] # don't think I can set just one value in the dict with set__ # so let's make a copy then alter it, then update it values = dict(entry.values) for header in entry.project.ordered_schema: if header.data_type == 'datetime': full_dt = '%s %s' % ( request.form.get('%s__date' % header.id, '') , request.form.get('%s__time' % header.id, '')) try: struct = time.strptime(full_dt, '%B %d, %Y %I:%M %p') edited_val = datetime.datetime.fromtimestamp( time.mktime(struct)) except ValueError: # requested change was improperly formatted message = ('Error. Date-time data expected for the' ' field "%s."' % header.label) return redirect_to_editing(entry, 'error', message) elif header.data_type == 'number': try: edited_val = float(request.form.get( str(header.id), '')) except ValueError: # requested change wasn't a number message = ('Error. Numerical data expected for the' ' field "%s."' % header.label) return redirect_to_editing(entry, 'error', message) else: edited_val = request.form.get(str(header.id), '') # values that were originally None will show up here as '' if edited_val == '' and entry.values[header.name] == None: continue if edited_val != entry.values[header.name]: values[header.name] = edited_val modifications.append('updated "%s" from "%s" to "%s"' % (header.label, entry.values[header.name] , edited_val)) if modifications: ''' update the entry ''' entry.update(set__values = values) entry.update(set__was_never_edited = False) ''' generate some hashes used to check other possible duplicative and hidden entries ''' old_hash = str(entry.value_hash) # compute a hash for the new values m = hashlib.md5() #sorted_headers = [h.name for h in project.ordered_schema] sorted_headers = values.keys() sorted_headers.sort() for header in sorted_headers: value = values[header] if type(value) == type(u'unicode'): m.update(value.encode('utf8')) else: m.update(str(value)) new_hash = m.hexdigest() # if the old entry was unique and had a dupe.. # ..that dupe is now unique # limit one to flip just one of several possible dupes if entry.unique: old_dupes = Entry.objects(project=project, unique=False , value_hash=old_hash).limit(1) if old_dupes: flash('The entry you have edited had a duplicate in' ' the system. That duplicate has now been marked' ' "unique."', 'success') old_dupes[0].update(set__unique = True) # process hidden entries # if there are hidden entries with these values.. # ..sound the alarm hidden_entries = Entry.objects(project=project , visible=False).only('value_hash') hidden_hashes = [h['value_hash'] for h in hidden_entries] if entry.visible and new_hash in hidden_hashes: flash('Warning: an entry with these values was previously' ' "hidden," i.e. removed from analysis. Consider' ' hiding this entry.', 'warning') # if the entry was hidden, remind the user of that fact.. # since the values were just edited they may want to change it if not entry.visible: flash('This entry is currently "hidden" and not included' ' in analysis. Consider un-hiding it to include your' ' new edits.', 'warning') # search for unique and duplicate values uniques = Entry.objects(project=project , unique=True).only('value_hash') unique_hashes = [u['value_hash'] for u in uniques] # if entry /was/ unique if entry.unique: if new_hash in unique_hashes: flash('This entry is now a duplicate of another entry' ' in the system.', 'warning') entry.update(set__unique=False) # entry /wasn't/ unique else: if new_hash not in unique_hashes: flash('This entry was formerly a duplicate but is now' ' unique.', 'success') entry.update(set__unique=True) entry.update(set__value_hash = new_hash) ''' create a comment encapsulating the changes ''' new_comment = Comment( body = '; '.join(modifications) , creation_time = datetime.datetime.utcnow() , editable = False , entry = entry , owner = user , project = project ) new_comment.save() message = ('Changes saved successfully: %s' % '; '.join(modifications)) return redirect_to_editing(entry, 'success', message) else: # no modifications made to the entry return redirect_to_editing(entry, None, None) elif form_type == 'hide_entry': # flip the 'visible' state of this entry if entry.visible: entry.update(set__visible = False) modifications = 'entry removed from analysis' else: entry.update(set__visible = True) modifications = 'entry re-included in analysis' # also find all duplicates in the project and hide/unhide them as well duplicate_entries = Entry.objects(project=project , value_hash=entry.value_hash) # if we have more than the original entry if len(duplicate_entries) > 1: for duplicate_entry in duplicate_entries: if duplicate_entry.id == entry.id: continue entry.reload() if entry.visible: duplicate_entry.update(set__visible = True) else: duplicate_entry.update(set__visible = False) # append a note about the dupes # plural.. dupes = len(duplicate_entries) - 1 if dupes > 1: plural = 's' else: plural = '' modifications += ' with %s duplicate%s' % (dupes, plural) ''' create a comment encapsulating the changes hm, comments won't be attached to duplicates..alas ''' new_comment = Comment( body = modifications , creation_time = datetime.datetime.utcnow() , editable = False , entry = entry , owner = user , project = project ) new_comment.save() message = 'Changes saved successfully: %s.' % modifications return redirect_to_editing(entry, 'success', message)
class TestScheduler(RQTestCase): def setUp(self): super(TestScheduler, self).setUp() self.scheduler = Scheduler(connection=self.testconn) def test_birth_and_death_registration(self): """ When scheduler registers it's birth, besides creating a key, it should also set an expiry that's a few seconds longer than it's polling interval so it automatically expires if scheduler is unexpectedly terminated. """ key = Scheduler.scheduler_key self.assertNotIn(key, tl(self.testconn.keys('*'))) scheduler = Scheduler(connection=self.testconn, interval=20) scheduler.register_birth() self.assertIn(key, tl(self.testconn.keys('*'))) self.assertEqual(self.testconn.ttl(key), 30) self.assertFalse(self.testconn.hexists(key, 'death')) self.assertRaises(ValueError, scheduler.register_birth) scheduler.register_death() self.assertTrue(self.testconn.hexists(key, 'death')) def test_create_job(self): """ Ensure that jobs are created properly. """ job = self.scheduler._create_job(say_hello, args=(), kwargs={}) job_from_queue = Job.fetch(job.id, connection=self.testconn) self.assertEqual(job, job_from_queue) self.assertEqual(job_from_queue.func, say_hello) def test_create_job_with_ttl(self): """ Ensure that TTL is passed to RQ. """ job = self.scheduler._create_job(say_hello, ttl=2, args=(), kwargs={}) job_from_queue = Job.fetch(job.id, connection=self.testconn) self.assertEqual(2, job_from_queue.ttl) def test_create_job_with_id(self): """ Ensure that ID is passed to RQ. """ job = self.scheduler._create_job(say_hello, id='id test', args=(), kwargs={}) job_from_queue = Job.fetch(job.id, connection=self.testconn) self.assertEqual('id test', job_from_queue.id) def test_create_job_with_description(self): """ Ensure that description is passed to RQ. """ job = self.scheduler._create_job(say_hello, description='description', args=(), kwargs={}) job_from_queue = Job.fetch(job.id, connection=self.testconn) self.assertEqual('description', job_from_queue.description) def test_job_not_persisted_if_commit_false(self): """ Ensure jobs are only saved to Redis if commit=True. """ job = self.scheduler._create_job(say_hello, commit=False) self.assertEqual(self.testconn.hgetall(job.key), {}) def test_create_scheduled_job(self): """ Ensure that scheduled jobs are put in the scheduler queue with the right score """ scheduled_time = datetime.utcnow() job = self.scheduler.enqueue_at(scheduled_time, say_hello) self.assertEqual(job, Job.fetch(job.id, connection=self.testconn)) self.assertIn(job.id, tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1))) self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id), to_unix(scheduled_time)) def test_enqueue_in(self): """ Ensure that jobs have the right scheduled time. """ right_now = datetime.utcnow() time_delta = timedelta(minutes=1) job = self.scheduler.enqueue_in(time_delta, say_hello) self.assertIn(job.id, tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1))) self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id), to_unix(right_now + time_delta)) time_delta = timedelta(hours=1) job = self.scheduler.enqueue_in(time_delta, say_hello) self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id), to_unix(right_now + time_delta)) def test_get_jobs(self): """ Ensure get_jobs() returns all jobs until the specified time. """ now = datetime.utcnow() job = self.scheduler.enqueue_at(now, say_hello) self.assertIn(job, self.scheduler.get_jobs(now)) future_time = now + timedelta(hours=1) job = self.scheduler.enqueue_at(future_time, say_hello) self.assertIn(job, self.scheduler.get_jobs(timedelta(hours=1, seconds=1))) self.assertIn(job, [j[0] for j in self.scheduler.get_jobs(with_times=True)]) self.assertIsInstance(self.scheduler.get_jobs(with_times=True)[0][1], datetime) self.assertNotIn(job, self.scheduler.get_jobs(timedelta(minutes=59, seconds=59))) def test_get_jobs_to_queue(self): """ Ensure that jobs scheduled the future are not queued. """ now = datetime.utcnow() job = self.scheduler.enqueue_at(now, say_hello) self.assertIn(job, self.scheduler.get_jobs_to_queue()) future_time = now + timedelta(hours=1) job = self.scheduler.enqueue_at(future_time, say_hello) self.assertNotIn(job, self.scheduler.get_jobs_to_queue()) def test_enqueue_job(self): """ When scheduled job is enqueued, make sure: - Job is removed from the sorted set of scheduled jobs - "enqueued_at" attribute is properly set - Job appears in the right queue - Queue is recognized by rq's Queue.all() """ now = datetime.utcnow() queue_name = 'foo' scheduler = Scheduler(connection=self.testconn, queue_name=queue_name) job = scheduler.enqueue_at(now, say_hello) self.scheduler.enqueue_job(job) self.assertNotIn(job, tl(self.testconn.zrange(scheduler.scheduled_jobs_key, 0, 10))) job = Job.fetch(job.id, connection=self.testconn) self.assertTrue(job.enqueued_at is not None) queue = scheduler.get_queue_for_job(job) self.assertIn(job, queue.jobs) queue = Queue.from_queue_key('rq:queue:{0}'.format(queue_name)) self.assertIn(job, queue.jobs) self.assertIn(queue, Queue.all()) def test_job_membership(self): now = datetime.utcnow() job = self.scheduler.enqueue_at(now, say_hello) self.assertIn(job, self.scheduler) self.assertIn(job.id, self.scheduler) self.assertNotIn("non-existing-job-id", self.scheduler) def test_cancel_scheduled_job(self): """ When scheduled job is canceled, make sure: - Job is removed from the sorted set of scheduled jobs """ # schedule a job to be enqueued one minute from now time_delta = timedelta(minutes=1) job = self.scheduler.enqueue_in(time_delta, say_hello) # cancel the scheduled job and check that it's gone from the set self.scheduler.cancel(job) self.assertNotIn(job.id, tl(self.testconn.zrange( self.scheduler.scheduled_jobs_key, 0, 1))) def test_change_execution_time(self): """ Ensure ``change_execution_time`` is called, ensure that job's score is updated """ job = self.scheduler.enqueue_at(datetime.utcnow(), say_hello) new_date = datetime(2010, 1, 1) self.scheduler.change_execution_time(job, new_date) self.assertEqual(to_unix(new_date), self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id)) self.scheduler.cancel(job) self.assertRaises(ValueError, self.scheduler.change_execution_time, job, new_date) def test_args_kwargs_are_passed_correctly(self): """ Ensure that arguments and keyword arguments are properly saved to jobs. """ job = self.scheduler.enqueue_at(datetime.utcnow(), simple_addition, 1, 1, 1) self.assertEqual(job.args, (1, 1, 1)) job = self.scheduler.enqueue_at(datetime.utcnow(), simple_addition, z=1, y=1, x=1) self.assertEqual(job.kwargs, {'x': 1, 'y': 1, 'z': 1}) job = self.scheduler.enqueue_at(datetime.utcnow(), simple_addition, 1, z=1, y=1) self.assertEqual(job.kwargs, {'y': 1, 'z': 1}) self.assertEqual(job.args, (1,)) time_delta = timedelta(minutes=1) job = self.scheduler.enqueue_in(time_delta, simple_addition, 1, 1, 1) self.assertEqual(job.args, (1, 1, 1)) job = self.scheduler.enqueue_in(time_delta, simple_addition, z=1, y=1, x=1) self.assertEqual(job.kwargs, {'x': 1, 'y': 1, 'z': 1}) job = self.scheduler.enqueue_in(time_delta, simple_addition, 1, z=1, y=1) self.assertEqual(job.kwargs, {'y': 1, 'z': 1}) self.assertEqual(job.args, (1,)) def test_enqueue_is_deprecated(self): """ Ensure .enqueue() throws a DeprecationWarning """ with warnings.catch_warnings(record=True) as w: # Enable all warnings warnings.simplefilter("always") job = self.scheduler.enqueue(datetime.utcnow(), say_hello) self.assertEqual(1, len(w)) self.assertEqual(w[0].category, DeprecationWarning) def test_enqueue_periodic(self): """ Ensure .enqueue_periodic() throws a DeprecationWarning """ with warnings.catch_warnings(record=True) as w: # Enable all warnings warnings.simplefilter("always") job = self.scheduler.enqueue_periodic(datetime.utcnow(), 1, None, say_hello) self.assertEqual(1, len(w)) self.assertEqual(w[0].category, DeprecationWarning) def test_interval_and_repeat_persisted_correctly(self): """ Ensure that interval and repeat attributes get correctly saved in Redis. """ job = self.scheduler.schedule(datetime.utcnow(), say_hello, interval=10, repeat=11) job_from_queue = Job.fetch(job.id, connection=self.testconn) self.assertEqual(job_from_queue.meta['interval'], 10) self.assertEqual(job_from_queue.meta['repeat'], 11) def test_crontab_persisted_correctly(self): """ Ensure that crontab attribute gets correctly saved in Redis. """ # create a job that runs one minute past each whole hour job = self.scheduler.cron("1 * * * *", say_hello) job_from_queue = Job.fetch(job.id, connection=self.testconn) self.assertEqual(job_from_queue.meta['cron_string'], "1 * * * *") # get the scheduled_time and convert it to a datetime object unix_time = self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id) datetime_time = from_unix(unix_time) # check that minute=1, seconds=0, and is within an hour assert datetime_time.minute == 1 assert datetime_time.second == 0 assert datetime_time - datetime.utcnow() < timedelta(hours=1) def test_crontab_sets_timeout(self): """ Ensure that a job scheduled via crontab can be created with a custom timeout. """ timeout = 13 job = self.scheduler.cron("1 * * * *", say_hello, timeout=timeout) job_from_queue = Job.fetch(job.id, connection=self.testconn) self.assertEqual(job_from_queue.timeout, timeout) def test_crontab_sets_id(self): """ Ensure that a job scheduled via crontab can be created with a custom id """ job_id = "hello-job-id" job = self.scheduler.cron("1 * * * *", say_hello, id=job_id) job_from_queue = Job.fetch(job.id, connection=self.testconn) self.assertEqual(job_id, job_from_queue.id) def test_crontab_sets_default_result_ttl(self): """ Ensure that a job scheduled via crontab gets proper default result_ttl (-1) periodic tasks. """ job = self.scheduler.cron("1 * * * *", say_hello) job_from_queue = Job.fetch(job.id, connection=self.testconn) self.assertEqual(-1, job_from_queue.result_ttl) def test_crontab_sets_description(self): """ Ensure that a job scheduled via crontab can be created with a custom description """ description = 'test description' job = self.scheduler.cron("1 * * * *", say_hello, description=description) job_from_queue = Job.fetch(job.id, connection=self.testconn) self.assertEqual(description, job_from_queue.description) def test_repeat_without_interval_raises_error(self): # Ensure that an error is raised if repeat is specified without interval def create_job(): self.scheduler.schedule(datetime.utcnow(), say_hello, repeat=11) self.assertRaises(ValueError, create_job) def test_job_with_intervals_get_rescheduled(self): """ Ensure jobs with interval attribute are put back in the scheduler """ time_now = datetime.utcnow() interval = 10 job = self.scheduler.schedule(time_now, say_hello, interval=interval) self.scheduler.enqueue_job(job) self.assertIn(job.id, tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1))) self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id), to_unix(time_now) + interval) # Now the same thing using enqueue_periodic job = self.scheduler.enqueue_periodic(time_now, interval, None, say_hello) self.scheduler.enqueue_job(job) self.assertIn(job.id, tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1))) self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id), to_unix(time_now) + interval) def test_job_with_crontab_get_rescheduled(self): # Create a job with a cronjob_string job = self.scheduler.cron("1 * * * *", say_hello) # current unix_time old_next_scheduled_time = self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id) # change crontab job.meta['cron_string'] = "2 * * * *" # enqueue the job self.scheduler.enqueue_job(job) self.assertIn(job.id, tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1))) # check that next scheduled time has changed self.assertNotEqual(old_next_scheduled_time, self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id)) # check that new next scheduled time is set correctly expected_next_scheduled_time = to_unix(get_next_scheduled_time("2 * * * *")) self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id), expected_next_scheduled_time) def test_job_with_repeat(self): """ Ensure jobs with repeat attribute are put back in the scheduler X (repeat) number of times """ time_now = datetime.utcnow() interval = 10 # If job is repeated once, the job shouldn't be put back in the queue job = self.scheduler.schedule(time_now, say_hello, interval=interval, repeat=1) self.scheduler.enqueue_job(job) self.assertNotIn(job.id, tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1))) # If job is repeated twice, it should only be put back in the queue once job = self.scheduler.schedule(time_now, say_hello, interval=interval, repeat=2) self.scheduler.enqueue_job(job) self.assertIn(job.id, tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1))) self.scheduler.enqueue_job(job) self.assertNotIn(job.id, tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1))) time_now = datetime.utcnow() # Now the same thing using enqueue_periodic job = self.scheduler.enqueue_periodic(time_now, interval, 1, say_hello) self.scheduler.enqueue_job(job) self.assertNotIn(job.id, tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1))) # If job is repeated twice, it should only be put back in the queue once job = self.scheduler.enqueue_periodic(time_now, interval, 2, say_hello) self.scheduler.enqueue_job(job) self.assertIn(job.id, tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1))) self.scheduler.enqueue_job(job) self.assertNotIn(job.id, tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1))) def test_missing_jobs_removed_from_scheduler(self): """ Ensure jobs that don't exist when queued are removed from the scheduler. """ job = self.scheduler.schedule(datetime.utcnow(), say_hello) job.cancel() self.scheduler.get_jobs_to_queue() self.assertNotIn(job.id, tl(self.testconn.zrange( self.scheduler.scheduled_jobs_key, 0, 1))) def test_periodic_jobs_sets_result_ttl(self): """ Ensure periodic jobs set result_ttl to infinite. """ job = self.scheduler.schedule(datetime.utcnow(), say_hello, interval=5) job_from_queue = Job.fetch(job.id, connection=self.testconn) self.assertEqual(job.result_ttl, -1) def test_periodic_jobs_sets_ttl(self): """ Ensure periodic jobs sets correctly ttl. """ job = self.scheduler.schedule(datetime.utcnow(), say_hello, interval=5, ttl=4) job_from_queue = Job.fetch(job.id, connection=self.testconn) self.assertEqual(job.ttl, 4) def test_periodic_job_sets_id(self): """ Ensure that ID is passed to RQ by schedule. """ job = self.scheduler.schedule(datetime.utcnow(), say_hello, interval=5, id='id test') job_from_queue = Job.fetch(job.id, connection=self.testconn) self.assertEqual('id test', job.id) def test_periodic_job_sets_description(self): """ Ensure that description is passed to RQ by schedule. """ job = self.scheduler.schedule(datetime.utcnow(), say_hello, interval=5, description='description') job_from_queue = Job.fetch(job.id, connection=self.testconn) self.assertEqual('description', job.description) def test_run(self): """ Check correct signal handling in Scheduler.run(). """ def send_stop_signal(): """ Sleep for 1 second, then send a INT signal to ourself, so the signal handler installed by scheduler.run() is called. """ time.sleep(1) os.kill(os.getpid(), signal.SIGINT) thread = Thread(target=send_stop_signal) thread.start() self.assertRaises(SystemExit, self.scheduler.run) thread.join() def test_scheduler_w_o_explicit_connection(self): """ Ensure instantiating Scheduler w/o explicit connection works. """ s = Scheduler() self.assertEqual(s.connection, self.testconn) def test_small_float_interval(self): """ Test that scheduler accepts 'interval' of type float, less than 1 second. """ key = Scheduler.scheduler_key self.assertNotIn(key, tl(self.testconn.keys('*'))) scheduler = Scheduler(connection=self.testconn, interval=0.1) # testing interval = 0.1 second self.assertEqual(scheduler._interval, 0.1) #register birth scheduler.register_birth() self.assertIn(key, tl(self.testconn.keys('*'))) self.assertEqual(self.testconn.ttl(key), 10) # int(0.1) + 10 = 10 self.assertFalse(self.testconn.hexists(key, 'death')) #enqueue a job now = datetime.utcnow() job = scheduler.enqueue_at(now, say_hello) self.assertIn(job, self.scheduler.get_jobs_to_queue()) self.assertEqual(len(self.scheduler.get_jobs()), 1) #register death scheduler.register_death() #test that run works with the small floating-point interval def send_stop_signal(): """ Sleep for 1 second, then send a INT signal to ourself, so the signal handler installed by scheduler.run() is called. """ time.sleep(1) os.kill(os.getpid(), signal.SIGINT) thread = Thread(target=send_stop_signal) thread.start() self.assertRaises(SystemExit, scheduler.run) thread.join() #all jobs must have been scheduled during 1 second self.assertEqual(len(scheduler.get_jobs()), 0)
class TestScheduler(RQTestCase): def setUp(self): super(TestScheduler, self).setUp() self.scheduler = Scheduler(connection=self.testconn) def test_birth_and_death_registration(self): key = Scheduler.scheduler_key self.assertNotIn(key, self.testconn.keys('*')) scheduler = Scheduler(connection=self.testconn) scheduler.register_birth() self.assertIn(key, self.testconn.keys('*')) self.assertFalse(self.testconn.hexists(key, 'death')) self.assertRaises(ValueError, scheduler.register_birth) scheduler.register_death() self.assertTrue(self.testconn.hexists(key, 'death')) def test_create_job(self): """ Ensure that jobs are created properly. """ job = self.scheduler._create_job(say_hello, args=(), kwargs={}) job_from_queue = Job.fetch(job.id, connection=self.testconn) self.assertEqual(job, job_from_queue) self.assertEqual(job_from_queue.func, say_hello) def test_job_not_persisted_if_commit_false(self): """ Ensure jobs are only saved to Redis if commit=True. """ job = self.scheduler._create_job(say_hello, commit=False) self.assertEqual(self.testconn.hgetall(job.key), {}) def test_create_scheduled_job(self): """ Ensure that scheduled jobs are put in the scheduler queue with the right score """ scheduled_time = datetime.now() job = self.scheduler.enqueue_at(scheduled_time, say_hello) self.assertEqual(job, Job.fetch(job.id, connection=self.testconn)) self.assertIn(job.id, self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)) self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id), int(scheduled_time.strftime('%s'))) def test_enqueue_in(self): """ Ensure that jobs have the right scheduled time. """ right_now = datetime.now() time_delta = timedelta(minutes=1) job = self.scheduler.enqueue_in(time_delta, say_hello) self.assertIn(job.id, self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)) self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id), int((right_now + time_delta).strftime('%s'))) time_delta = timedelta(hours=1) job = self.scheduler.enqueue_in(time_delta, say_hello) self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id), int((right_now + time_delta).strftime('%s'))) def test_get_jobs_to_queue(self): """ Ensure that jobs scheduled the future are not queued. """ now = datetime.now() job = self.scheduler.enqueue_at(now, say_hello) self.assertIn(job, self.scheduler.get_jobs_to_queue()) future_time = now + timedelta(hours=1) job = self.scheduler.enqueue_at(future_time, say_hello) self.assertNotIn(job, self.scheduler.get_jobs_to_queue()) def test_enqueue_job(self): """ When scheduled job is enqueued, make sure: - Job is removed from the sorted set of scheduled jobs - "enqueued_at" attribute is properly set - Job appears in the right queue """ now = datetime.now() queue_name = 'foo' scheduler = Scheduler(connection=self.testconn, queue_name=queue_name) job = scheduler.enqueue_at(now, say_hello) self.scheduler.enqueue_job(job) self.assertNotIn(job, self.testconn.zrange(scheduler.scheduled_jobs_key, 0, 10)) job = Job.fetch(job.id, connection=self.testconn) self.assertTrue(job.enqueued_at is not None) queue = scheduler.get_queue_for_job(job) self.assertIn(job, queue.jobs) queue = Queue.from_queue_key('rq:queue:{0}'.format(queue_name)) self.assertIn(job, queue.jobs) def test_cancel_scheduled_job(self): """ When scheduled job is canceled, make sure: - Job is removed from the sorted set of scheduled jobs """ # schedule a job to be enqueued one minute from now time_delta = timedelta(minutes=1) job = self.scheduler.enqueue_in(time_delta, say_hello) # cancel the scheduled job and check that it's gone from the set self.scheduler.cancel(job) self.assertNotIn(job.id, self.testconn.zrange( self.scheduler.scheduled_jobs_key, 0, 1)) def test_change_execution_time(self): """ Ensure ``change_execution_time`` is called, ensure that job's score is updated """ job = self.scheduler.enqueue_at(datetime.now(), say_hello) new_date = datetime(2010, 1, 1) self.scheduler.change_execution_time(job, new_date) self.assertEqual(int(new_date.strftime('%s')), self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id)) def test_args_kwargs_are_passed_correctly(self): """ Ensure that arguments and keyword arguments are properly saved to jobs. """ job = self.scheduler.enqueue_at(datetime.now(), simple_addition, 1, 1, 1) self.assertEqual(job.args, (1, 1, 1)) job = self.scheduler.enqueue_at(datetime.now(), simple_addition, z=1, y=1, x=1) self.assertEqual(job.kwargs, {'x': 1, 'y': 1, 'z': 1}) job = self.scheduler.enqueue_at(datetime.now(), simple_addition, 1, z=1, y=1) self.assertEqual(job.kwargs, {'y': 1, 'z': 1}) self.assertEqual(job.args, (1,)) time_delta = timedelta(minutes=1) job = self.scheduler.enqueue_in(time_delta, simple_addition, 1, 1, 1) self.assertEqual(job.args, (1, 1, 1)) job = self.scheduler.enqueue_in(time_delta, simple_addition, z=1, y=1, x=1) self.assertEqual(job.kwargs, {'x': 1, 'y': 1, 'z': 1}) job = self.scheduler.enqueue_in(time_delta, simple_addition, 1, z=1, y=1) self.assertEqual(job.kwargs, {'y': 1, 'z': 1}) self.assertEqual(job.args, (1,)) def test_interval_and_repeat_persisted_correctly(self): """ Ensure that interval and repeat attributes get correctly saved in Redis. """ job = self.scheduler.enqueue(datetime.now(), say_hello, interval=10, repeat=11) job_from_queue = Job.fetch(job.id, connection=self.testconn) self.assertEqual(int(job_from_queue.interval), 10) self.assertEqual(int(job_from_queue.repeat), 11) def test_repeat_without_interval_raises_error(self): # Ensure that an error is raised if repeat is specified without interval def create_job(): self.scheduler.enqueue(datetime.now(), say_hello, repeat=11) self.assertRaises(ValueError, create_job) def test_job_with_intervals_get_rescheduled(self): """ Ensure jobs with interval attribute are put back in the scheduler """ time_now = datetime.now() interval = 10 job = self.scheduler.enqueue(time_now, say_hello, interval=interval) self.scheduler.enqueue_job(job) self.assertIn(job.id, self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)) self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id), int(time_now.strftime('%s')) + interval) # Now the same thing using enqueue_periodic job = self.scheduler.enqueue_periodic(time_now, interval, None, say_hello) self.scheduler.enqueue_job(job) self.assertIn(job.id, self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)) self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id), int(time_now.strftime('%s')) + interval) def test_job_with_repeat(self): """ Ensure jobs with repeat attribute are put back in the scheduler X (repeat) number of times """ time_now = datetime.now() interval = 10 # If job is repeated once, the job shouldn't be put back in the queue job = self.scheduler.enqueue(time_now, say_hello, interval=interval, repeat=1) self.scheduler.enqueue_job(job) self.assertNotIn(job.id, self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)) # If job is repeated twice, it should only be put back in the queue once job = self.scheduler.enqueue(time_now, say_hello, interval=interval, repeat=2) self.scheduler.enqueue_job(job) self.assertIn(job.id, self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)) self.scheduler.enqueue_job(job) self.assertNotIn(job.id, self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)) time_now = datetime.now() # Now the same thing using enqueue_periodic job = self.scheduler.enqueue_periodic(time_now, interval, 1, say_hello) self.scheduler.enqueue_job(job) self.assertNotIn(job.id, self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)) # If job is repeated twice, it should only be put back in the queue once job = self.scheduler.enqueue_periodic(time_now, interval, 2, say_hello) self.scheduler.enqueue_job(job) self.assertIn(job.id, self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)) self.scheduler.enqueue_job(job) self.assertNotIn(job.id, self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)) def test_missing_jobs_removed_from_scheduler(self): """ Ensure jobs that don't exist when queued are removed from the scheduler. """ job = self.scheduler.enqueue(datetime.now(), say_hello) job.cancel() self.scheduler.get_jobs_to_queue() self.assertNotIn(job.id, self.testconn.zrange( self.scheduler.scheduled_jobs_key, 0, 1)) def test_periodic_jobs_sets_ttl(self): """ Ensure periodic jobs set result_ttl to infinite. """ job = self.scheduler.enqueue(datetime.now(), say_hello, interval=5) job_from_queue = Job.fetch(job.id, connection=self.testconn) self.assertEqual(job.result_ttl, -1)
def send_webhook(task_id, job_id, execution_id, method, url, headers, retries, retry_count): app = current_app job = Job.get_by_id(task_id, job_id) logger = app.logger.bind( operation="send_webhook", task_id=task_id, job_id=job_id, execution_id=execution_id, method=method, url=url, headers=headers, retries=retries, retry_count=retry_count, ) if job is None: logger.error("Failed to retrieve task or job.") return False execution = job.get_execution_by_id(execution_id) logger.info("Execution loaded successfully") data = execution.to_dict(include_log=True, include_error=True) data = json.loads(json.dumps(data)) if "webhookDispatch" in data["metadata"]: del data["metadata"]["webhookDispatch"] data["metadata"]["custom"] = job.metadata.get("custom", {}) data = json.dumps(data) try: w = WebhooksDispatcher() response = w.dispatch(method, url, data, headers) execution.metadata.setdefault("webhookDispatch", []) execution.metadata["webhookDispatch"].append({ "timestamp": datetime.utcnow().isoformat(), "url": url, "statusCode": response.status_code, "body": response.body, "headers": response.headers, }) job.save() logger.info("Webhook dispatched successfully.") except WebhooksDispatchError as err: error = traceback.format_exc() execution.metadata.setdefault("webhookDispatch", []) execution.metadata["webhookDispatch"].append({ "timestamp": datetime.utcnow().isoformat(), "url": url, "statusCode": err.status_code, "body": err.body, "headers": err.headers, "error": error, }) job.save() logger.error("Failed to dispatch webhook.", err=error) if retry_count < retries: logger.debug("Retrying...") args = [ task_id, job_id, execution_id, method, url, headers, retries, retry_count + 1, ] scheduler = Scheduler("webhooks", connection=current_app.redis) factor = app.config["WEBHOOKS_EXPONENTIAL_BACKOFF_FACTOR"] min_backoff = app.config[ "WEBHOOKS_EXPONENTIAL_BACKOFF_MIN_MS"] / 1000.0 delta = timedelta(seconds=math.pow(factor, retry_count) * min_backoff) scheduler.enqueue_in(delta, send_webhook, *args) logger.info("Webhook dispatch retry scheduled.", date=delta)
# coding:utf-8 import time from redis import Redis from rq import Queue from rq_scheduler import Scheduler from datetime import datetime, timedelta scheduler = Scheduler( connection=Redis(), queue_name='Log') # Get a scheduler for the "default" queue def hello(): for i in range(10): print time.time(), i scheduler.enqueue_in(timedelta(minutes=1), hello)
class TestScheduler(RQTestCase): def setUp(self): super(TestScheduler, self).setUp() self.scheduler = Scheduler(connection=self.testconn) def test_birth_and_death_registration(self): """ When scheduler registers it's birth, besides creating a key, it should also set an expiry that's a few seconds longer than it's polling interval so it automatically expires if scheduler is unexpectedly terminated. """ key = Scheduler.scheduler_key self.assertNotIn(key, tl(self.testconn.keys('*'))) scheduler = Scheduler(connection=self.testconn, interval=20) scheduler.register_birth() self.assertIn(key, tl(self.testconn.keys('*'))) self.assertEqual(self.testconn.ttl(key), 30) self.assertFalse(self.testconn.hexists(key, 'death')) self.assertRaises(ValueError, scheduler.register_birth) scheduler.register_death() self.assertTrue(self.testconn.hexists(key, 'death')) def test_create_job(self): """ Ensure that jobs are created properly. """ job = self.scheduler._create_job(say_hello, args=(), kwargs={}) job_from_queue = Job.fetch(job.id, connection=self.testconn) self.assertEqual(job, job_from_queue) self.assertEqual(job_from_queue.func, say_hello) def test_job_not_persisted_if_commit_false(self): """ Ensure jobs are only saved to Redis if commit=True. """ job = self.scheduler._create_job(say_hello, commit=False) self.assertEqual(self.testconn.hgetall(job.key), {}) def test_create_scheduled_job(self): """ Ensure that scheduled jobs are put in the scheduler queue with the right score """ scheduled_time = datetime.utcnow() job = self.scheduler.enqueue_at(scheduled_time, say_hello) self.assertEqual(job, Job.fetch(job.id, connection=self.testconn)) self.assertIn(job.id, tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1))) self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id), to_unix(scheduled_time)) def test_enqueue_in(self): """ Ensure that jobs have the right scheduled time. """ right_now = datetime.utcnow() time_delta = timedelta(minutes=1) job = self.scheduler.enqueue_in(time_delta, say_hello) self.assertIn(job.id, tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1))) self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id), to_unix(right_now + time_delta)) time_delta = timedelta(hours=1) job = self.scheduler.enqueue_in(time_delta, say_hello) self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id), to_unix(right_now + time_delta)) def test_get_jobs(self): """ Ensure get_jobs() returns all jobs until the specified time. """ now = datetime.utcnow() job = self.scheduler.enqueue_at(now, say_hello) self.assertIn(job, self.scheduler.get_jobs(now)) future_time = now + timedelta(hours=1) job = self.scheduler.enqueue_at(future_time, say_hello) self.assertIn(job, self.scheduler.get_jobs(timedelta(hours=1, seconds=1))) self.assertIn(job, [j[0] for j in self.scheduler.get_jobs(with_times=True)]) self.assertIsInstance(self.scheduler.get_jobs(with_times=True)[0][1], datetime) self.assertNotIn(job, self.scheduler.get_jobs(timedelta(minutes=59, seconds=59))) def test_get_jobs_to_queue(self): """ Ensure that jobs scheduled the future are not queued. """ now = datetime.utcnow() job = self.scheduler.enqueue_at(now, say_hello) self.assertIn(job, self.scheduler.get_jobs_to_queue()) future_time = now + timedelta(hours=1) job = self.scheduler.enqueue_at(future_time, say_hello) self.assertNotIn(job, self.scheduler.get_jobs_to_queue()) def test_enqueue_job(self): """ When scheduled job is enqueued, make sure: - Job is removed from the sorted set of scheduled jobs - "enqueued_at" attribute is properly set - Job appears in the right queue """ now = datetime.utcnow() queue_name = 'foo' scheduler = Scheduler(connection=self.testconn, queue_name=queue_name) job = scheduler.enqueue_at(now, say_hello) self.scheduler.enqueue_job(job) self.assertNotIn(job, tl(self.testconn.zrange(scheduler.scheduled_jobs_key, 0, 10))) job = Job.fetch(job.id, connection=self.testconn) self.assertTrue(job.enqueued_at is not None) queue = scheduler.get_queue_for_job(job) self.assertIn(job, queue.jobs) queue = Queue.from_queue_key('rq:queue:{0}'.format(queue_name)) self.assertIn(job, queue.jobs) def test_job_membership(self): now = datetime.utcnow() job = self.scheduler.enqueue_at(now, say_hello) self.assertIn(job, self.scheduler) self.assertIn(job.id, self.scheduler) self.assertNotIn("non-existing-job-id", self.scheduler) def test_cancel_scheduled_job(self): """ When scheduled job is canceled, make sure: - Job is removed from the sorted set of scheduled jobs """ # schedule a job to be enqueued one minute from now time_delta = timedelta(minutes=1) job = self.scheduler.enqueue_in(time_delta, say_hello) # cancel the scheduled job and check that it's gone from the set self.scheduler.cancel(job) self.assertNotIn(job.id, tl(self.testconn.zrange( self.scheduler.scheduled_jobs_key, 0, 1))) def test_change_execution_time(self): """ Ensure ``change_execution_time`` is called, ensure that job's score is updated """ job = self.scheduler.enqueue_at(datetime.utcnow(), say_hello) new_date = datetime(2010, 1, 1) self.scheduler.change_execution_time(job, new_date) self.assertEqual(to_unix(new_date), self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id)) self.scheduler.cancel(job) self.assertRaises(ValueError, self.scheduler.change_execution_time, job, new_date) def test_args_kwargs_are_passed_correctly(self): """ Ensure that arguments and keyword arguments are properly saved to jobs. """ job = self.scheduler.enqueue_at(datetime.utcnow(), simple_addition, 1, 1, 1) self.assertEqual(job.args, (1, 1, 1)) job = self.scheduler.enqueue_at(datetime.utcnow(), simple_addition, z=1, y=1, x=1) self.assertEqual(job.kwargs, {'x': 1, 'y': 1, 'z': 1}) job = self.scheduler.enqueue_at(datetime.utcnow(), simple_addition, 1, z=1, y=1) self.assertEqual(job.kwargs, {'y': 1, 'z': 1}) self.assertEqual(job.args, (1,)) time_delta = timedelta(minutes=1) job = self.scheduler.enqueue_in(time_delta, simple_addition, 1, 1, 1) self.assertEqual(job.args, (1, 1, 1)) job = self.scheduler.enqueue_in(time_delta, simple_addition, z=1, y=1, x=1) self.assertEqual(job.kwargs, {'x': 1, 'y': 1, 'z': 1}) job = self.scheduler.enqueue_in(time_delta, simple_addition, 1, z=1, y=1) self.assertEqual(job.kwargs, {'y': 1, 'z': 1}) self.assertEqual(job.args, (1,)) def test_enqueue_is_deprecated(self): """ Ensure .enqueue() throws a DeprecationWarning """ with warnings.catch_warnings(record=True) as w: # Enable all warnings warnings.simplefilter("always") job = self.scheduler.enqueue(datetime.utcnow(), say_hello) self.assertEqual(1, len(w)) self.assertEqual(w[0].category, DeprecationWarning) def test_enqueue_periodic(self): """ Ensure .enqueue_periodic() throws a DeprecationWarning """ with warnings.catch_warnings(record=True) as w: # Enable all warnings warnings.simplefilter("always") job = self.scheduler.enqueue_periodic(datetime.utcnow(), 1, None, say_hello) self.assertEqual(1, len(w)) self.assertEqual(w[0].category, DeprecationWarning) def test_interval_and_repeat_persisted_correctly(self): """ Ensure that interval and repeat attributes get correctly saved in Redis. """ job = self.scheduler.schedule(datetime.utcnow(), say_hello, interval=10, repeat=11) job_from_queue = Job.fetch(job.id, connection=self.testconn) self.assertEqual(job_from_queue.meta['interval'], 10) self.assertEqual(job_from_queue.meta['repeat'], 11) def test_repeat_without_interval_raises_error(self): # Ensure that an error is raised if repeat is specified without interval def create_job(): self.scheduler.schedule(datetime.utcnow(), say_hello, repeat=11) self.assertRaises(ValueError, create_job) def test_job_with_intervals_get_rescheduled(self): """ Ensure jobs with interval attribute are put back in the scheduler """ time_now = datetime.utcnow() interval = 10 job = self.scheduler.schedule(time_now, say_hello, interval=interval) self.scheduler.enqueue_job(job) self.assertIn(job.id, tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1))) self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id), to_unix(time_now) + interval) # Now the same thing using enqueue_periodic job = self.scheduler.enqueue_periodic(time_now, interval, None, say_hello) self.scheduler.enqueue_job(job) self.assertIn(job.id, tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1))) self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id), to_unix(time_now) + interval) def test_job_with_repeat(self): """ Ensure jobs with repeat attribute are put back in the scheduler X (repeat) number of times """ time_now = datetime.utcnow() interval = 10 # If job is repeated once, the job shouldn't be put back in the queue job = self.scheduler.schedule(time_now, say_hello, interval=interval, repeat=1) self.scheduler.enqueue_job(job) self.assertNotIn(job.id, tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1))) # If job is repeated twice, it should only be put back in the queue once job = self.scheduler.schedule(time_now, say_hello, interval=interval, repeat=2) self.scheduler.enqueue_job(job) self.assertIn(job.id, tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1))) self.scheduler.enqueue_job(job) self.assertNotIn(job.id, tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1))) time_now = datetime.utcnow() # Now the same thing using enqueue_periodic job = self.scheduler.enqueue_periodic(time_now, interval, 1, say_hello) self.scheduler.enqueue_job(job) self.assertNotIn(job.id, tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1))) # If job is repeated twice, it should only be put back in the queue once job = self.scheduler.enqueue_periodic(time_now, interval, 2, say_hello) self.scheduler.enqueue_job(job) self.assertIn(job.id, tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1))) self.scheduler.enqueue_job(job) self.assertNotIn(job.id, tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1))) def test_missing_jobs_removed_from_scheduler(self): """ Ensure jobs that don't exist when queued are removed from the scheduler. """ job = self.scheduler.schedule(datetime.utcnow(), say_hello) job.cancel() self.scheduler.get_jobs_to_queue() self.assertNotIn(job.id, tl(self.testconn.zrange( self.scheduler.scheduled_jobs_key, 0, 1))) def test_periodic_jobs_sets_ttl(self): """ Ensure periodic jobs set result_ttl to infinite. """ job = self.scheduler.schedule(datetime.utcnow(), say_hello, interval=5) job_from_queue = Job.fetch(job.id, connection=self.testconn) self.assertEqual(job.result_ttl, -1) def test_run(self): """ Check correct signal handling in Scheduler.run(). """ def send_stop_signal(): """ Sleep for 1 second, then send a INT signal to ourself, so the signal handler installed by scheduler.run() is called. """ time.sleep(1) os.kill(os.getpid(), signal.SIGINT) thread = Thread(target=send_stop_signal) thread.start() self.assertRaises(SystemExit, self.scheduler.run) thread.join() def test_scheduler_w_o_explicit_connection(self): """ Ensure instantiating Scheduler w/o explicit connection works. """ s = Scheduler() self.assertEqual(s.connection, self.testconn) def test_no_functions_from__main__module(self): """ Ensure functions from the __main__ module are not accepted for scheduling. """ def dummy(): return 1 # Fake __main__ module function dummy.__module__ = "__main__" self.assertRaises(ValueError, self.scheduler._create_job, dummy)
from rq_test.rq_test1 import redis_conn from rq_test.worker import test_3, test_4, test_2 # scheduler = Scheduler(connection=Redis()) # Get a scheduler for the "default" queue queue = Queue(connection=redis_conn) queue1 = Queue('low', connection=redis_conn) queue2 = Queue('high', connection=redis_conn) scheduler = Scheduler(queue=queue, connection=redis_conn) scheduler1 = Scheduler(queue=queue1, connection=redis_conn) scheduler2 = Scheduler(queue=queue2, connection=redis_conn) def schedule(): scheduler.schedule( scheduled_time=datetime.utcnow(), # Time for first execution func=test_3, # Function to be queued interval=3, # Time before the function is called again, in seconds repeat=None # Repeat this number of times (None means repeat forever) ) if __name__ == "__main__": # schedule() scheduler.enqueue_in(timedelta(seconds=10), test_2, '大爷好') scheduler1.enqueue_in(timedelta(minutes=1), test_3, '大爷慢走') scheduler2.enqueue_in(timedelta(seconds=15), test_4, '大爷常来') # list_of_job_instances = scheduler.get_jobs() # for job in list_of_job_instances: # scheduler.cancel(job) # print('scheduler.get_jobs() = ', scheduler.get_jobs())