Exemple #1
0
def cancel_all(queue_name):
    scheduler = Scheduler()
    for job in scheduler.get_jobs():
        if job.origin == queue_name:
            scheduler.cancel(job)

    return dict(status='OK')
Exemple #2
0
def cancel_scheduled_task(request):
    job_id = request.GET.get('job_id')
    from rq_scheduler import Scheduler

    scheduler = Scheduler('parser')
    scheduler.cancel(job_id)
    return HttpResponse('Success')
Exemple #3
0
def stop_job(task_id, job_id):
    logger = g.logger.bind(operation="stop", task_id=task_id, job_id=job_id)

    logger.debug("Getting job...")
    job = Job.get_by_id(task_id=task_id, job_id=job_id)

    if job is None:
        logger.error("Job not found in task.")
        abort(404)

        return

    execution = job.get_last_execution()

    if execution is not None and execution.status == JobExecution.Status.running:
        logger.debug("Stopping current execution...")
        executor = current_app.load_executor()
        executor.stop_job(job.task, job, execution)
        logger.debug("Current execution stopped.")

    scheduler = Scheduler("jobs", connection=current_app.redis)

    if "enqueued_id" in job.metadata and job.metadata[
            "enqueued_id"] in scheduler:
        scheduler.cancel(job.metadata["enqueued_id"])
        job.scheduled = False
        job.save()

    logger.debug("Job stopped.")

    return get_job_summary(task_id, job_id)
Exemple #4
0
def update_scheduled_connection(connection):
    ''' schedule a new scrape of the connection source
    interval was changed
    or the last job was finished and the next needs to be scheduled
    '''
    repeating_task = connection.schedule
    # check to see if schedule is available -- abort if not
    # note that ready_to_connect does not verify this
    if not repeating_task.interval:
        return False
    
    # connect to the rq scheduler
    redis_config = app.config['REDIS_CONFIG']
    use_connection(Redis(redis_config['host'], redis_config['port']
            , password=redis_config['password']))
    scheduler = Scheduler()
    
    # see if this schedule had a job that was already enqueued
    if repeating_task.next_task_id:
        # instantiate the job
        job = Job(id=repeating_task.next_task_id)
        # cancel the old job
        scheduler.cancel(job)

    # determine how many seconds to wait
    delay = _calculate_schedule_delay(repeating_task.interval)
    
    # start a new job
    job = scheduler.enqueue_in(datetime.timedelta(seconds=int(delay))
        , connect_to_source, connection.id)
    
    # save this id and when it runs next
    repeating_task.update(set__next_task_id = job.id)
    repeating_task.update(set__next_run_time = (datetime.datetime.utcnow() 
        + datetime.timedelta(seconds=delay)))
def cancel_all(queue_name):
    scheduler = Scheduler()
    for job in scheduler.get_jobs():
        if job.origin == queue_name:
            scheduler.cancel(job)

    return dict(status='OK')
Exemple #6
0
def cancel_scheduled_task(request):
    job_id = request.GET.get('job_id')
    from rq_scheduler import Scheduler

    scheduler = Scheduler('parser')
    scheduler.cancel(job_id)
    return HttpResponse('Success')
Exemple #7
0
def cancel_scheduled_task(request):

    from rq_scheduler import Scheduler
    job_id = request.GET.get('job_id')

    scheduler = Scheduler('default', connection=tasks.redis_conn)
    scheduler.cancel(job_id)
    return HttpResponse('Success')
Exemple #8
0
def create_schedules():
    from towerdashboard.jobs import refresh_github_branches
    scheduler = Scheduler(connection=Redis('redis'))
    for j in scheduler.get_jobs():
        scheduler.cancel(j)

    scheduler.schedule(scheduled_time=datetime.utcnow(),
                       func=refresh_github_branches,
                       interval=120, repeat=None, result_ttl=120)
Exemple #9
0
def stop_synchronization(job_name):

    r = StrictRedis()
    scheduler = Scheduler(connection=r)

    if job_name not in scheduler:
        logger.error("job not exists")
        return

    scheduler.cancel(job_name)
def main():

    parser = argparse.ArgumentParser()

    parser.add_argument("action",
                        choices=['show', 'remove', 'purge'],
                        help="Which action to run.")

    parser.add_argument(
        "object",
        help=
        "Which object to remove (if removing). Add 'all' where this argument is unused."
    )

    parser.add_argument(
        "-e",
        '--env',
        choices=['development', 'test', 'production'],
        help=
        "Run within a specific environment. Otherwise run under the environment defined in the environment variable CS_ENV"
    )

    args = parser.parse_args()
    if args.env != None:
        os.environ['CS_ENV'] = args.env

    queue_name = os.environ['CS_ENV']
    scheduler = Scheduler(queue_name=os.environ['CS_ENV'], connection=Redis())

    if (args.action == "show"):
        print("\n")
        print("=================================")
        print("  Job Schedule For {0}".format(os.environ['CS_ENV']))
        print("=================================")
        print("\n")
        for job in scheduler.get_jobs(until=timedelta(hours=24),
                                      with_times=True):
            print("ID: {1}\n    Job: {0}\n    Time: {2}\n".format(
                job[0].description, job[0].id, job[1]))
    elif (args.action == "remove"):
        if (args.object is None):
            print("Please specify the job to remove")
        else:
            jobs = scheduler.get_jobs()
            for job in jobs:
                if (args.object == job.id):
                    scheduler.cancel(job.id)
                    print("Job {0} cancelled from {1}".format(
                        args.object, os.environ['CS_ENV']))
    elif (args.action == "purge"):
        count = 0
        for job in scheduler.get_jobs():
            count += 1
            scheduler.cancel(job.id)
        print("Purged {0} jobs from {1}".format(count, os.environ['CS_ENV']))
def cancel_job(conn, job_id):

    scheduler = Scheduler(connection=conn)

    if job_id in scheduler:
        scheduler.cancel(job_id)
        print " Job: " + job_id + " deleted"

    else:
        print " Job: " + str(job_id) + " not scheduled"

    return None
Exemple #12
0
def register_scheduler():
    scheduler = Scheduler('lidarts-bulk', connection=Redis())
    list_of_job_instances = scheduler.get_jobs()
    for job in list_of_job_instances:
        scheduler.cancel(job)
    scheduler.schedule(
        scheduled_time=datetime.utcnow(),
        func='lidarts.tasks.bulk_update_last_seen',
        interval=5,
        repeat=None,
        ttl=10,
    )
def kill_schedule(channel, verbose=True):
    try:
        scheduler = Scheduler(channel, connection=RCONN)
        jobs_and_times = scheduler.get_jobs(with_times=True)
        for job in jobs_and_times:
            print("job", job)
            scheduler.cancel(job[0].id)
        if verbose:
            print("All Jobs Killed")
        return True
    except Exception as e:
        print("Errors in killing jobs", e)
        return False
Exemple #14
0
    def partial_update(self, request, *args, **kwargs):
        print("PERFORM PARTIAL UPDATE !!!!!!!!")
        response = super(PeriodicJobViewSet,
                         self).partial_update(request, *args, **kwargs)

        script = self.get_object()
        print("------------>", script.interval)

        # scheduler add or delete

        print("scheduler !!!")

        job_id = get_job_id(script.id)
        scheduler = Scheduler("default", connection=Redis())

        print("job_id", job_id)
        job = None
        try:
            redis = Redis()
            job = Job.fetch(job_id, connection=redis)
            print("there is a job", job)

            scheduler.cancel(job)
            print("Cancel the job")

        except NoSuchJobError as e:
            job = None
        finally:
            job = None

        if job is None and script.interval > 0:
            print("Add for schedule ...")
            scheduler.schedule(
                scheduled_time=datetime.utcnow(
                ),  # Time for first execution, in UTC timezone
                func=func3,  # Function to be queued
                args=[{
                    "script_id": script.id
                }],  # Arguments passed into function when executed
                id=job_id,
                interval=script.
                interval,  # Time before the function is called again, in seconds
                repeat=
                None,  # Repeat this number of times (None means repeat forever)
                result_ttl=script.interval * 2,
            )
            #  do not set a result_ttl value or you set a value larger than the interval

        return response
Exemple #15
0
    def __remove_from_scheduler(self):
        rq_job_ids = self._rq_job_ids
        if rq_job_ids:
            queue = Queue(SchedulerDB.submit_queue_name, connection=self.con)
            scheduler = Scheduler(queue=queue, connection=self.con)
            for rq_job_id in rq_job_ids:
                try:
                    scheduler.cancel(rq_job_id)
                    RqJob.fetch(rq_job_id, connection=self.con).delete()
                    self.logger.debug(
                        f"Rq job {rq_job_id} removed from the rq scheduler")
                except NoSuchJobError:
                    pass

            self.logger.debug(f"{self} has been removed from the rq scheduler")
Exemple #16
0
def start_scheduler(redis_url, redis_password=None, queue_name='job_scheduler_queue'):
    queue = redis_queue(redis_url, redis_password, queue_name)
    scheduler = Scheduler(queue_name=queue.name, connection=queue.connection)

    queue.empty()
    for job in scheduler.get_jobs():
        scheduler.cancel(job)
        logger.info(f"Removed old job {job} from scheduler.")

    # add jobs to scheduler
    job = scheduler.cron(
        cron_string="* * * * *",  # once a minute
        func=log_review,
        args=[datetime.now(), choice(['Alice', 'Bob', 'Carol', 'Dave'])],
        queue_name=queue.name,
        repeat=None
    )
    logger.info(f"Added job {job}")

    return scheduler
def test_schedule_jobs(mocker, queue, jobid):
    sch = Scheduler(queue=queue, connection=queue.connection)
    sch.cancel = mocker.MagicMock()
    jobs.schedule_jobs(sch)
    assert jobid in sch
    assert len(list(sch.get_jobs())) == 1
    # running again should have no effect
    jobs.schedule_jobs(sch)
    assert jobid in sch
    assert len(list(sch.get_jobs())) == 1
    assert not sch.cancel.called
Exemple #18
0
def perform_stop_job_execution(job, execution, logger, stop_schedule=True):
    if execution is None:
        if not job.executions:
            msg = "No executions found in job."

            return (
                False,
                return_error(msg,
                             "stop_job_execution",
                             status=400,
                             logger=logger),
            )

        execution = job.get_last_execution()

    if execution is not None and execution.status == JobExecution.Status.running:
        logger.debug("Stopping current execution...")
        executor = current_app.executor
        executor.stop_job(job.task, job, execution)
        logger.debug("Current execution stopped.")

    if "retries" in job.metadata:
        job.metadata["retry_count"] = job.metadata["retries"] + 1
        job.save()

    scheduler = Scheduler("jobs", connection=current_app.redis)

    if (stop_schedule and "enqueued_id" in job.metadata
            and job.metadata["enqueued_id"] in scheduler):
        scheduler.cancel(job.metadata["enqueued_id"])
        job.scheduled = False

    if execution.error is None:
        execution.error = ""
    execution.error += "\nUser stopped job execution manually."
    execution.status = JobExecution.Status.failed
    job.save()

    logger.debug("Job stopped.")

    return True, None
Exemple #19
0
def setup_scheduler(func, repeat_every=60):
    r = get_connection()
    scheduled_job_id = r.get(KEY)

    scheduler = Scheduler(connection=r)
    if scheduled_job_id:
        logger.info(f'Canceling old job {scheduled_job_id}')
        scheduler.cancel(
            scheduled_job_id)  # schedule old job before scheduling a new one

    job = scheduler.schedule(
        scheduled_time=datetime.utcnow(
        ),  # Time for first execution, in UTC timezone
        func=func,  # Function to be queued
        interval=
        repeat_every,  # Time before the function is called again, in seconds
        repeat=None  # Repeat this number of times (None means repeat forever)
    )
    logger.info("Scheduled function %s to be executed every %s seconds" %
                (func.__name__, repeat_every))
    r.set(KEY, job.id)
Exemple #20
0
def update_scheduled_send(schedule_id):
    ''' schedule a new sending
    interval was changed
    or the last job has finished and next needs to be scheduled
    '''
    schedules = Schedule.objects(id=schedule_id)
    if not schedules:
        return False
    schedule = schedules[0]

    # confirm that schedule is valid
    if not schedule.interval:
        return False
    
    # connect to the rq scheduler
    redis_config = app.config['REDIS_CONFIG']
    use_connection(Redis(redis_config['host'], redis_config['port']
            , password=redis_config['password']))
    scheduler = Scheduler()

    # see if this schedule had a job that was already enqueued
    if schedule.next_task_id:
        # instantiate the job
        job = Job(id=schedule.next_task_id)
        # cancel the old job
        # tried rescheduling but that was not working
        scheduler.cancel(job)
    
    # determine how many seconds to wait
    delay = _calculate_schedule_delay(schedule.interval)

    # start a new job
    job = scheduler.enqueue_in(datetime.timedelta(seconds=int(delay))
        , send_scheduled_report, schedule.id)

    # save the id of this job and when it next runs
    schedule.update(set__next_task_id = job.id)
    schedule.update(set__next_run_time = (datetime.datetime.utcnow() 
        + datetime.timedelta(seconds=delay)))
Exemple #21
0
def rq_check_job_status_scheduler(job_ids, register_following_job_callback,
                                  redis_url):
    jobs = []
    status = True
    rq_connection = Redis.from_url(redis_url)
    try:
        for jid in job_ids:
            _j = Job.fetch(jid, connection=rq_connection)
            jobs.append(_j)
            print('Checking job', _j.id)
    except NoSuchJobError as e:
        status = False
    except Exception as e:
        status = False

    # print(jobs)
    try:
        if status:
            for job in jobs:
                if job.status != 'finished':
                    print('not finished yet')
                    return

            register_following_job_callback(redis_url)
            scheduler = Scheduler('high', connection=rq_connection
                                  )  # Get a scheduler for the "foo" queue
            scheduler.cancel(get_current_job())
        else:
            raise Exception('One or more downloads failed')
    except Exception as e:
        print(e)
        import traceback
        traceback.print_tb(e.__traceback__)
        print("Canceling scheduler", get_current_job().id)
        scheduler = Scheduler(
            'high',
            connection=rq_connection)  # Get a scheduler for the "foo" queue
        scheduler.cancel(get_current_job())
Exemple #22
0
class RqClient:

    def __init__(self, conf: RqConfig, prefix: str = ''):
        self.redis_conn = Redis(host=conf.HOST, port=conf.PORT, db=conf.DB)
        self.queue = Queue(connection=self.redis_conn)
        self.prefix = prefix
        self.scheduler = Scheduler(connection=self.redis_conn, queue=self.queue)
        self.scheduler_conf_path = conf.SCHEDULER_CONF_PATH
        self.control = Control(self.redis_conn)

    def init_scheduler(self):
        # remove old scheduled tasks
        for job in self.scheduler.get_jobs():
            self.scheduler.cancel(job)

        # create new tasks from config file
        if self.scheduler_conf_path:
            with open(self.scheduler_conf_path) as f:
                for entry in json.load(f):
                    self.scheduler.cron(
                        entry['schedule'],
                        f'{self.prefix}.{entry["task"]}',
                        kwargs=entry['kwargs'] if 'kwargs' in entry else None
                    )

    def send_task(self, name, args=None, time_limit=None, soft_time_limit=None):
        try:
            job = self.queue.enqueue(f'{self.prefix}.{name}', ttl=time_limit, args=args)
            return ResultWrapper(job)
        except Exception as ex:
            logging.getLogger(__name__).error(ex)

    def AsyncResult(self, ident):
        try:
            return ResultWrapper(Job.fetch(ident, connection=self.redis_conn))
        except NoSuchJobError:
            logging.getLogger(__name__).warning(f'Job {ident} not found')
            return None
Exemple #23
0
class TestScheduler(RQTestCase):
    def setUp(self):
        super(TestScheduler, self).setUp()
        self.scheduler = Scheduler(connection=self.testconn)

    def test_birth_and_death_registration(self):
        key = Scheduler.scheduler_key
        self.assertNotIn(key, self.testconn.keys('*'))
        scheduler = Scheduler(connection=self.testconn)
        scheduler.register_birth()
        self.assertIn(key, self.testconn.keys('*'))
        self.assertFalse(self.testconn.hexists(key, 'death'))
        self.assertRaises(ValueError, scheduler.register_birth)
        scheduler.register_death()
        self.assertTrue(self.testconn.hexists(key, 'death'))

    def test_create_job(self):
        """
        Ensure that jobs are created properly.
        """
        job = self.scheduler._create_job(say_hello, args=(), kwargs={})
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job, job_from_queue)
        self.assertEqual(job_from_queue.func, say_hello)

    def test_job_not_persisted_if_commit_false(self):
        """
        Ensure jobs are only saved to Redis if commit=True.
        """
        job = self.scheduler._create_job(say_hello, commit=False)
        self.assertEqual(self.testconn.hgetall(job.key), {})

    def test_create_scheduled_job(self):
        """
        Ensure that scheduled jobs are put in the scheduler queue with the right score
        """
        scheduled_time = datetime.now()
        job = self.scheduler.enqueue_at(scheduled_time, say_hello)
        self.assertEqual(job, Job.fetch(job.id, connection=self.testconn))
        self.assertIn(
            job.id,
            self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1))
        self.assertEqual(
            self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
            int(scheduled_time.strftime('%s')))

    def test_enqueue_in(self):
        """
        Ensure that jobs have the right scheduled time.
        """
        right_now = datetime.now()
        time_delta = timedelta(minutes=1)
        job = self.scheduler.enqueue_in(time_delta, say_hello)
        self.assertIn(
            job.id,
            self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1))
        self.assertEqual(
            self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
            int((right_now + time_delta).strftime('%s')))
        time_delta = timedelta(hours=1)
        job = self.scheduler.enqueue_in(time_delta, say_hello)
        self.assertEqual(
            self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
            int((right_now + time_delta).strftime('%s')))

    def test_get_jobs_to_queue(self):
        """
        Ensure that jobs scheduled the future are not queued.
        """
        now = datetime.now()
        job = self.scheduler.enqueue_at(now, say_hello)
        self.assertIn(job, self.scheduler.get_jobs_to_queue())
        future_time = now + timedelta(hours=1)
        job = self.scheduler.enqueue_at(future_time, say_hello)
        self.assertNotIn(job, self.scheduler.get_jobs_to_queue())

    def test_enqueue_job(self):
        """
        When scheduled job is enqueued, make sure:
        - Job is removed from the sorted set of scheduled jobs
        - "enqueued_at" attribute is properly set
        - Job appears in the right queue
        """
        now = datetime.now()
        queue_name = 'foo'
        scheduler = Scheduler(connection=self.testconn, queue_name=queue_name)

        job = scheduler.enqueue_at(now, say_hello)
        self.scheduler.enqueue_job(job)
        self.assertNotIn(
            job, self.testconn.zrange(scheduler.scheduled_jobs_key, 0, 10))
        job = Job.fetch(job.id, connection=self.testconn)
        self.assertTrue(job.enqueued_at is not None)
        queue = scheduler.get_queue_for_job(job)
        self.assertIn(job, queue.jobs)
        queue = Queue.from_queue_key('rq:queue:{0}'.format(queue_name))
        self.assertIn(job, queue.jobs)

    def test_cancel_scheduled_job(self):
        """
        When scheduled job is canceled, make sure:
        - Job is removed from the sorted set of scheduled jobs
        """
        # schedule a job to be enqueued one minute from now
        time_delta = timedelta(minutes=1)
        job = self.scheduler.enqueue_in(time_delta, say_hello)
        # cancel the scheduled job and check that it's gone from the set
        self.scheduler.cancel(job)
        self.assertNotIn(
            job.id,
            self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1))

    def test_change_execution_time(self):
        """
        Ensure ``change_execution_time`` is called, ensure that job's score is updated
        """
        job = self.scheduler.enqueue_at(datetime.now(), say_hello)
        new_date = datetime(2010, 1, 1)
        self.scheduler.change_execution_time(job, new_date)
        self.assertEqual(
            int(new_date.strftime('%s')),
            self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id))

    def test_args_kwargs_are_passed_correctly(self):
        """
        Ensure that arguments and keyword arguments are properly saved to jobs.
        """
        job = self.scheduler.enqueue_at(datetime.now(), simple_addition, 1, 1,
                                        1)
        self.assertEqual(job.args, (1, 1, 1))
        job = self.scheduler.enqueue_at(datetime.now(),
                                        simple_addition,
                                        z=1,
                                        y=1,
                                        x=1)
        self.assertEqual(job.kwargs, {'x': 1, 'y': 1, 'z': 1})
        job = self.scheduler.enqueue_at(datetime.now(),
                                        simple_addition,
                                        1,
                                        z=1,
                                        y=1)
        self.assertEqual(job.kwargs, {'y': 1, 'z': 1})
        self.assertEqual(job.args, (1, ))

        time_delta = timedelta(minutes=1)
        job = self.scheduler.enqueue_in(time_delta, simple_addition, 1, 1, 1)
        self.assertEqual(job.args, (1, 1, 1))
        job = self.scheduler.enqueue_in(time_delta,
                                        simple_addition,
                                        z=1,
                                        y=1,
                                        x=1)
        self.assertEqual(job.kwargs, {'x': 1, 'y': 1, 'z': 1})
        job = self.scheduler.enqueue_in(time_delta,
                                        simple_addition,
                                        1,
                                        z=1,
                                        y=1)
        self.assertEqual(job.kwargs, {'y': 1, 'z': 1})
        self.assertEqual(job.args, (1, ))

    def test_interval_and_repeat_persisted_correctly(self):
        """
        Ensure that interval and repeat attributes get correctly saved in Redis.
        """
        job = self.scheduler.enqueue(datetime.now(),
                                     say_hello,
                                     interval=10,
                                     repeat=11)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(int(job_from_queue.interval), 10)
        self.assertEqual(int(job_from_queue.repeat), 11)

    def test_repeat_without_interval_raises_error(self):
        # Ensure that an error is raised if repeat is specified without interval
        def create_job():
            self.scheduler.enqueue(datetime.now(), say_hello, repeat=11)

        self.assertRaises(ValueError, create_job)

    def test_job_with_intervals_get_rescheduled(self):
        """
        Ensure jobs with interval attribute are put back in the scheduler
        """
        time_now = datetime.now()
        interval = 10
        job = self.scheduler.enqueue(time_now, say_hello, interval=interval)
        self.scheduler.enqueue_job(job)
        self.assertIn(
            job.id,
            self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1))
        self.assertEqual(
            self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
            int(time_now.strftime('%s')) + interval)

        # Now the same thing using enqueue_periodic
        job = self.scheduler.enqueue_periodic(time_now, interval, None,
                                              say_hello)
        self.scheduler.enqueue_job(job)
        self.assertIn(
            job.id,
            self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1))
        self.assertEqual(
            self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
            int(time_now.strftime('%s')) + interval)

    def test_job_with_repeat(self):
        """
        Ensure jobs with repeat attribute are put back in the scheduler
        X (repeat) number of times
        """
        time_now = datetime.now()
        interval = 10
        # If job is repeated once, the job shouldn't be put back in the queue
        job = self.scheduler.enqueue(time_now,
                                     say_hello,
                                     interval=interval,
                                     repeat=1)
        self.scheduler.enqueue_job(job)
        self.assertNotIn(
            job.id,
            self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1))

        # If job is repeated twice, it should only be put back in the queue once
        job = self.scheduler.enqueue(time_now,
                                     say_hello,
                                     interval=interval,
                                     repeat=2)
        self.scheduler.enqueue_job(job)
        self.assertIn(
            job.id,
            self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1))
        self.scheduler.enqueue_job(job)
        self.assertNotIn(
            job.id,
            self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1))

        time_now = datetime.now()
        # Now the same thing using enqueue_periodic
        job = self.scheduler.enqueue_periodic(time_now, interval, 1, say_hello)
        self.scheduler.enqueue_job(job)
        self.assertNotIn(
            job.id,
            self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1))

        # If job is repeated twice, it should only be put back in the queue once
        job = self.scheduler.enqueue_periodic(time_now, interval, 2, say_hello)
        self.scheduler.enqueue_job(job)
        self.assertIn(
            job.id,
            self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1))
        self.scheduler.enqueue_job(job)
        self.assertNotIn(
            job.id,
            self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1))

    def test_missing_jobs_removed_from_scheduler(self):
        """
        Ensure jobs that don't exist when queued are removed from the scheduler.
        """
        job = self.scheduler.enqueue(datetime.now(), say_hello)
        job.cancel()
        self.scheduler.get_jobs_to_queue()
        self.assertNotIn(
            job.id,
            self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1))

    def test_periodic_jobs_sets_ttl(self):
        """
        Ensure periodic jobs set result_ttl to infinite.
        """
        job = self.scheduler.enqueue(datetime.now(), say_hello, interval=5)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job.result_ttl, -1)
Exemple #24
0
def cancel_job_view(job_id):
    scheduler = Scheduler()
    scheduler.cancel(job_id)
    return dict(status='OK')
Exemple #25
0
class TestScheduler(RQTestCase):

    def setUp(self):
        super(TestScheduler, self).setUp()
        self.scheduler = Scheduler(connection=self.testconn)
    
    def test_birth_and_death_registration(self):
        """
        When scheduler registers it's birth, besides creating a key, it should
        also set an expiry that's a few seconds longer than it's polling
        interval so it automatically expires if scheduler is unexpectedly 
        terminated.
        """
        key = Scheduler.scheduler_key
        self.assertNotIn(key, tl(self.testconn.keys('*')))
        scheduler = Scheduler(connection=self.testconn, interval=20)
        scheduler.register_birth()
        self.assertIn(key, tl(self.testconn.keys('*')))
        self.assertEqual(self.testconn.ttl(key), 30)
        self.assertFalse(self.testconn.hexists(key, 'death'))
        self.assertRaises(ValueError, scheduler.register_birth)
        scheduler.register_death()
        self.assertTrue(self.testconn.hexists(key, 'death'))

    def test_create_job(self):
        """
        Ensure that jobs are created properly.
        """
        job = self.scheduler._create_job(say_hello, args=(), kwargs={})
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job, job_from_queue)
        self.assertEqual(job_from_queue.func, say_hello)

    def test_job_not_persisted_if_commit_false(self):
        """
        Ensure jobs are only saved to Redis if commit=True.
        """
        job = self.scheduler._create_job(say_hello, commit=False)
        self.assertEqual(self.testconn.hgetall(job.key), {})

    def test_create_scheduled_job(self):
        """
        Ensure that scheduled jobs are put in the scheduler queue with the right score
        """
        scheduled_time = datetime.utcnow()
        job = self.scheduler.enqueue_at(scheduled_time, say_hello)
        self.assertEqual(job, Job.fetch(job.id, connection=self.testconn))
        self.assertIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))
        self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
                         to_unix(scheduled_time))

    def test_enqueue_in(self):
        """
        Ensure that jobs have the right scheduled time.
        """
        right_now = datetime.utcnow()
        time_delta = timedelta(minutes=1)
        job = self.scheduler.enqueue_in(time_delta, say_hello)
        self.assertIn(job.id,
                      tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))
        self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
                         to_unix(right_now + time_delta))
        time_delta = timedelta(hours=1)
        job = self.scheduler.enqueue_in(time_delta, say_hello)
        self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
                         to_unix(right_now + time_delta))

    def test_get_jobs(self):
        """
        Ensure get_jobs() returns all jobs until the specified time.
        """
        now = datetime.utcnow()
        job = self.scheduler.enqueue_at(now, say_hello)
        self.assertIn(job, self.scheduler.get_jobs(now))
        future_time = now + timedelta(hours=1)
        job = self.scheduler.enqueue_at(future_time, say_hello)
        self.assertIn(job, self.scheduler.get_jobs(timedelta(hours=1, seconds=1)))
        self.assertIn(job, [j[0] for j in self.scheduler.get_jobs(with_times=True)])
        self.assertIsInstance(self.scheduler.get_jobs(with_times=True)[0][1], datetime)
        self.assertNotIn(job, self.scheduler.get_jobs(timedelta(minutes=59, seconds=59)))
    
    def test_get_jobs_to_queue(self):
        """
        Ensure that jobs scheduled the future are not queued.
        """
        now = datetime.utcnow()
        job = self.scheduler.enqueue_at(now, say_hello)
        self.assertIn(job, self.scheduler.get_jobs_to_queue())
        future_time = now + timedelta(hours=1)
        job = self.scheduler.enqueue_at(future_time, say_hello)
        self.assertNotIn(job, self.scheduler.get_jobs_to_queue())

    def test_enqueue_job(self):
        """
        When scheduled job is enqueued, make sure:
        - Job is removed from the sorted set of scheduled jobs
        - "enqueued_at" attribute is properly set
        - Job appears in the right queue
        """
        now = datetime.utcnow()
        queue_name = 'foo'
        scheduler = Scheduler(connection=self.testconn, queue_name=queue_name)

        job = scheduler.enqueue_at(now, say_hello)
        self.scheduler.enqueue_job(job)
        self.assertNotIn(job, tl(self.testconn.zrange(scheduler.scheduled_jobs_key, 0, 10)))
        job = Job.fetch(job.id, connection=self.testconn)
        self.assertTrue(job.enqueued_at is not None)
        queue = scheduler.get_queue_for_job(job)
        self.assertIn(job, queue.jobs)
        queue = Queue.from_queue_key('rq:queue:{0}'.format(queue_name))
        self.assertIn(job, queue.jobs)

    def test_job_membership(self):
        now = datetime.utcnow()
        job = self.scheduler.enqueue_at(now, say_hello)
        self.assertIn(job, self.scheduler)
        self.assertIn(job.id, self.scheduler)
        self.assertNotIn("non-existing-job-id", self.scheduler)

    def test_cancel_scheduled_job(self):
        """
        When scheduled job is canceled, make sure:
        - Job is removed from the sorted set of scheduled jobs
        """
        # schedule a job to be enqueued one minute from now
        time_delta = timedelta(minutes=1)
        job = self.scheduler.enqueue_in(time_delta, say_hello)
        # cancel the scheduled job and check that it's gone from the set
        self.scheduler.cancel(job)
        self.assertNotIn(job.id, tl(self.testconn.zrange(
            self.scheduler.scheduled_jobs_key, 0, 1)))

    def test_change_execution_time(self):
        """
        Ensure ``change_execution_time`` is called, ensure that job's score is updated
        """
        job = self.scheduler.enqueue_at(datetime.utcnow(), say_hello)
        new_date = datetime(2010, 1, 1)
        self.scheduler.change_execution_time(job, new_date)
        self.assertEqual(to_unix(new_date),
            self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id))
        self.scheduler.cancel(job)
        self.assertRaises(ValueError, self.scheduler.change_execution_time, job, new_date)

    def test_args_kwargs_are_passed_correctly(self):
        """
        Ensure that arguments and keyword arguments are properly saved to jobs.
        """
        job = self.scheduler.enqueue_at(datetime.utcnow(), simple_addition, 1, 1, 1)
        self.assertEqual(job.args, (1, 1, 1))
        job = self.scheduler.enqueue_at(datetime.utcnow(), simple_addition, z=1, y=1, x=1)
        self.assertEqual(job.kwargs, {'x': 1, 'y': 1, 'z': 1})
        job = self.scheduler.enqueue_at(datetime.utcnow(), simple_addition, 1, z=1, y=1)
        self.assertEqual(job.kwargs, {'y': 1, 'z': 1})
        self.assertEqual(job.args, (1,))

        time_delta = timedelta(minutes=1)
        job = self.scheduler.enqueue_in(time_delta, simple_addition, 1, 1, 1)
        self.assertEqual(job.args, (1, 1, 1))
        job = self.scheduler.enqueue_in(time_delta, simple_addition, z=1, y=1, x=1)
        self.assertEqual(job.kwargs, {'x': 1, 'y': 1, 'z': 1})
        job = self.scheduler.enqueue_in(time_delta, simple_addition, 1, z=1, y=1)
        self.assertEqual(job.kwargs, {'y': 1, 'z': 1})
        self.assertEqual(job.args, (1,))

    def test_enqueue_is_deprecated(self):
        """
        Ensure .enqueue() throws a DeprecationWarning
        """
        with warnings.catch_warnings(record=True) as w:
            # Enable all warnings
            warnings.simplefilter("always")
            job = self.scheduler.enqueue(datetime.utcnow(), say_hello)
            self.assertEqual(1, len(w))
            self.assertEqual(w[0].category, DeprecationWarning)

    def test_enqueue_periodic(self):
        """
        Ensure .enqueue_periodic() throws a DeprecationWarning
        """
        with warnings.catch_warnings(record=True) as w:
            # Enable all warnings
            warnings.simplefilter("always")
            job = self.scheduler.enqueue_periodic(datetime.utcnow(), 1, None, say_hello)
            self.assertEqual(1, len(w))
            self.assertEqual(w[0].category, DeprecationWarning)

    def test_interval_and_repeat_persisted_correctly(self):
        """
        Ensure that interval and repeat attributes get correctly saved in Redis.
        """
        job = self.scheduler.schedule(datetime.utcnow(), say_hello, interval=10, repeat=11)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job_from_queue.meta['interval'], 10)
        self.assertEqual(job_from_queue.meta['repeat'], 11)

    def test_repeat_without_interval_raises_error(self):
        # Ensure that an error is raised if repeat is specified without interval
        def create_job():
            self.scheduler.schedule(datetime.utcnow(), say_hello, repeat=11)
        self.assertRaises(ValueError, create_job)

    def test_job_with_intervals_get_rescheduled(self):
        """
        Ensure jobs with interval attribute are put back in the scheduler
        """
        time_now = datetime.utcnow()
        interval = 10
        job = self.scheduler.schedule(time_now, say_hello, interval=interval)
        self.scheduler.enqueue_job(job)
        self.assertIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))
        self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
                         to_unix(time_now) + interval)

        # Now the same thing using enqueue_periodic
        job = self.scheduler.enqueue_periodic(time_now, interval, None, say_hello)
        self.scheduler.enqueue_job(job)
        self.assertIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))
        self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
                         to_unix(time_now) + interval)

    def test_job_with_repeat(self):
        """
        Ensure jobs with repeat attribute are put back in the scheduler
        X (repeat) number of times
        """
        time_now = datetime.utcnow()
        interval = 10
        # If job is repeated once, the job shouldn't be put back in the queue
        job = self.scheduler.schedule(time_now, say_hello, interval=interval, repeat=1)
        self.scheduler.enqueue_job(job)
        self.assertNotIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))

        # If job is repeated twice, it should only be put back in the queue once
        job = self.scheduler.schedule(time_now, say_hello, interval=interval, repeat=2)
        self.scheduler.enqueue_job(job)
        self.assertIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))
        self.scheduler.enqueue_job(job)
        self.assertNotIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))

        time_now = datetime.utcnow()
        # Now the same thing using enqueue_periodic
        job = self.scheduler.enqueue_periodic(time_now, interval, 1, say_hello)
        self.scheduler.enqueue_job(job)
        self.assertNotIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))

        # If job is repeated twice, it should only be put back in the queue once
        job = self.scheduler.enqueue_periodic(time_now, interval, 2, say_hello)
        self.scheduler.enqueue_job(job)
        self.assertIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))
        self.scheduler.enqueue_job(job)
        self.assertNotIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))

    def test_missing_jobs_removed_from_scheduler(self):
        """
        Ensure jobs that don't exist when queued are removed from the scheduler.
        """
        job = self.scheduler.schedule(datetime.utcnow(), say_hello)
        job.cancel()
        self.scheduler.get_jobs_to_queue()
        self.assertNotIn(job.id, tl(self.testconn.zrange(
            self.scheduler.scheduled_jobs_key, 0, 1)))

    def test_periodic_jobs_sets_ttl(self):
        """
        Ensure periodic jobs set result_ttl to infinite.
        """
        job = self.scheduler.schedule(datetime.utcnow(), say_hello, interval=5)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job.result_ttl, -1)

    def test_run(self):
        """
        Check correct signal handling in Scheduler.run().
        """
        def send_stop_signal():
            """
            Sleep for 1 second, then send a INT signal to ourself, so the
            signal handler installed by scheduler.run() is called.
            """
            time.sleep(1)
            os.kill(os.getpid(), signal.SIGINT)
        thread = Thread(target=send_stop_signal)
        thread.start()
        self.assertRaises(SystemExit, self.scheduler.run)
        thread.join()

    def test_scheduler_w_o_explicit_connection(self):
        """
        Ensure instantiating Scheduler w/o explicit connection works.
        """
        s = Scheduler()
        self.assertEqual(s.connection, self.testconn)

    def test_no_functions_from__main__module(self):
        """
        Ensure functions from the __main__ module are not accepted for scheduling.
        """
        def dummy():
            return 1
        # Fake __main__ module function
        dummy.__module__ = "__main__"
        self.assertRaises(ValueError, self.scheduler._create_job, dummy)
Exemple #26
0
class RqClient:
    def __init__(self, conf: RqConfig, prefix: str = ''):
        self.redis_conn = Redis(host=conf.HOST, port=conf.PORT, db=conf.DB)
        self.queue = Queue(connection=self.redis_conn)
        self.prefix = prefix
        self.scheduler = Scheduler(connection=self.redis_conn,
                                   queue=self.queue)
        self.scheduler_conf_path = conf.SCHEDULER_CONF_PATH
        self.control = Control(self.redis_conn)

    def init_scheduler(self):
        # remove old scheduled tasks
        for job in self.scheduler.get_jobs():
            self.scheduler.cancel(job)

        # create new tasks from config file
        if self.scheduler_conf_path:
            with open(self.scheduler_conf_path) as f:
                for entry in json.load(f):
                    self.scheduler.cron(
                        entry['schedule'],
                        f'{self.prefix}.{entry["task"]}',
                        kwargs=entry['kwargs'] if 'kwargs' in entry else None)

    @staticmethod
    def _resolve_limit(softl, hardl):
        if softl is not None and hardl is not None:
            return min(softl, hardl)
        elif softl is not None:
            return softl
        elif hardl is not None:
            return hardl
        return None

    def send_task(self,
                  name,
                  args=None,
                  time_limit=None,
                  soft_time_limit=None):
        """
        Send a task to the worker.

        Please note that Rq does not know hard vs. soft time limit. In case both
        values are filled in (time_limit, soft_time_limit), the smaller one is
        selected. Otherwise, the non-None is applied.
        """
        time_limit = self._resolve_limit(time_limit, soft_time_limit)
        try:
            job = self.queue.enqueue(f'{self.prefix}.{name}',
                                     job_timeoutx=time_limit,
                                     args=args)
            return ResultWrapper(job)
        except Exception as ex:
            logging.getLogger(__name__).error(ex)

    def AsyncResult(self, ident):
        try:
            return ResultWrapper(Job.fetch(ident, connection=self.redis_conn))
        except NoSuchJobError:
            logging.getLogger(__name__).warning(f'Job {ident} not found')
            return None
def schedule_delete(schedule_inst):
    scheduler = Scheduler(connection=Redis())  # Get a scheduler for the "default" queue
    scheduler.cancel(schedule_inst)
    return "Deleted"
Exemple #28
0
class RqClient(AbstractBgClient):
    def __init__(self, conf: RqConfig, prefix: str = ''):
        self.redis_conn = Redis(host=conf.HOST, port=conf.PORT, db=conf.DB)
        self.queue = Queue(connection=self.redis_conn)
        self.prefix = prefix
        self.scheduler = Scheduler(connection=self.redis_conn,
                                   queue=self.queue)
        self.scheduler_conf_path = conf.SCHEDULER_CONF_PATH
        self._control = Control(self.redis_conn)

    def init_scheduler(self):
        # remove old scheduled tasks
        for job in self.scheduler.get_jobs():
            self.scheduler.cancel(job)

        # create new tasks from config file
        if self.scheduler_conf_path:
            with open(self.scheduler_conf_path) as f:
                for entry in json.load(f):
                    self.scheduler.cron(
                        entry['schedule'],
                        f'{self.prefix}.{entry["task"]}',
                        kwargs=entry['kwargs'] if 'kwargs' in entry else None,
                        use_local_timezone=True,
                    )
            logging.getLogger(__name__).info(
                f'Loaded configuration for Rq-scheduler from {self.scheduler_conf_path}'
            )
        else:
            logging.getLogger(__name__).warning(
                'No Rq-scheduler configuration path defined. '
                'Regular system maintenance will be disabled which may lead to disks becoming full.'
            )

    @staticmethod
    def _resolve_limit(softl, hardl):
        if softl is not None and hardl is not None:
            return min(softl, hardl)
        elif softl is not None:
            return softl
        elif hardl is not None:
            return hardl
        return None

    @property
    def control(self):
        return self._control

    def send_task(self,
                  name,
                  ans_type: Type[T],
                  args=None,
                  time_limit=None,
                  soft_time_limit=None) -> ResultWrapper[T]:
        """
        Send a task to the worker.

        Please note that Rq does not know hard vs. soft time limit. In case both
        values are filled in (time_limit, soft_time_limit), the smaller one is
        selected. Otherwise, the non-None is applied.
        """
        time_limit = self._resolve_limit(time_limit, soft_time_limit)
        try:
            job = self.queue.enqueue(f'{self.prefix}.{name}',
                                     job_timeout=time_limit,
                                     args=args)
            return ResultWrapper(job)
        except Exception as ex:
            logging.getLogger(__name__).error(ex)

    def get_task_error(self, task_id):
        try:
            job = Job.fetch(task_id, connection=self.redis_conn)
            if job.get_status() == 'failed':
                return BgCalcError(job.exc_info)
        except NoSuchJobError as ex:
            return CalcTaskNotFoundError(ex)
        return None

    def AsyncResult(self, ident):
        try:
            return ResultWrapper(Job.fetch(ident, connection=self.redis_conn))
        except NoSuchJobError:
            logging.getLogger(__name__).warning(f'Job {ident} not found')
            return None

    def is_wrapped_user_error(self, err):
        return isinstance(err, UserActionException)
class TestScheduler(RQTestCase):
    def setUp(self):
        super(TestScheduler, self).setUp()
        self.scheduler = Scheduler(connection=self.testconn)

    def test_acquire_lock(self):
        """
        When scheduler acquires a lock, besides creating a key, it should
        also set an expiry that's a few seconds longer than it's polling
        interval so it automatically expires if scheduler is unexpectedly
        terminated.
        """
        key = '%s_lock' % Scheduler.scheduler_key
        self.assertNotIn(key, tl(self.testconn.keys('*')))
        scheduler = Scheduler(connection=self.testconn, interval=20)
        self.assertTrue(scheduler.acquire_lock())
        self.assertIn(key, tl(self.testconn.keys('*')))
        self.assertEqual(self.testconn.ttl(key), 30)
        scheduler.remove_lock()
        self.assertNotIn(key, tl(self.testconn.keys('*')))

    def test_no_two_schedulers_acquire_lock(self):
        """
        Ensure that no two schedulers can acquire the lock at the
        same time. When removing the lock, only the scheduler which
        originally acquired the lock can remove the lock.
        """
        key = '%s_lock' % Scheduler.scheduler_key
        self.assertNotIn(key, tl(self.testconn.keys('*')))
        scheduler1 = Scheduler(connection=self.testconn, interval=20)
        scheduler2 = Scheduler(connection=self.testconn, interval=20)
        self.assertTrue(scheduler1.acquire_lock())
        self.assertFalse(scheduler2.acquire_lock())
        self.assertIn(key, tl(self.testconn.keys('*')))
        scheduler2.remove_lock()
        self.assertIn(key, tl(self.testconn.keys('*')))
        scheduler1.remove_lock()
        self.assertNotIn(key, tl(self.testconn.keys('*')))

    def test_create_job(self):
        """
        Ensure that jobs are created properly.
        """
        job = self.scheduler._create_job(say_hello, args=(), kwargs={})
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job, job_from_queue)
        self.assertEqual(job_from_queue.func, say_hello)

    def test_create_job_with_ttl(self):
        """
        Ensure that TTL is passed to RQ.
        """
        job = self.scheduler._create_job(say_hello, ttl=2, args=(), kwargs={})
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(2, job_from_queue.ttl)

    def test_create_job_with_id(self):
        """
        Ensure that ID is passed to RQ.
        """
        job = self.scheduler._create_job(say_hello,
                                         id='id test',
                                         args=(),
                                         kwargs={})
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual('id test', job_from_queue.id)

    def test_create_job_with_description(self):
        """
        Ensure that description is passed to RQ.
        """
        job = self.scheduler._create_job(say_hello,
                                         description='description',
                                         args=(),
                                         kwargs={})
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual('description', job_from_queue.description)

    def test_create_job_with_timeout(self):
        """
        Ensure that timeout is passed to RQ.
        """
        timeout = 13
        job = self.scheduler._create_job(say_hello,
                                         timeout=13,
                                         args=(),
                                         kwargs={})
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(timeout, job_from_queue.timeout)

    def test_job_not_persisted_if_commit_false(self):
        """
        Ensure jobs are only saved to Redis if commit=True.
        """
        job = self.scheduler._create_job(say_hello, commit=False)
        self.assertEqual(self.testconn.hgetall(job.key), {})

    def test_create_scheduled_job(self):
        """
        Ensure that scheduled jobs are put in the scheduler queue with the right score
        """
        scheduled_time = datetime.utcnow()
        job = self.scheduler.enqueue_at(scheduled_time, say_hello)
        self.assertEqual(job, Job.fetch(job.id, connection=self.testconn))
        self.assertIn(
            job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))
        self.assertEqual(
            self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
            to_unix(scheduled_time))

    def test_create_job_with_meta(self):
        """
        Ensure that meta information on the job is passed to rq
        """
        expected = {'say': 'hello'}
        job = self.scheduler._create_job(say_hello, meta=expected)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(expected, job_from_queue.meta)

    def test_enqueue_at_sets_timeout(self):
        """
        Ensure that a job scheduled via enqueue_at can be created with
        a custom timeout.
        """
        timeout = 13
        job = self.scheduler.enqueue_at(datetime.utcnow(),
                                        say_hello,
                                        timeout=timeout)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job_from_queue.timeout, timeout)

    def test_enqueue_at_sets_job_id(self):
        """
        Ensure that a job scheduled via enqueue_at can be created with
        a custom job id.
        """
        job_id = 'test_id'
        job = self.scheduler.enqueue_at(datetime.utcnow(),
                                        say_hello,
                                        job_id=job_id)
        self.assertEqual(job.id, job_id)

    def test_enqueue_at_sets_job_ttl(self):
        """
        Ensure that a job scheduled via enqueue_at can be created with a custom job ttl.
        """
        job_ttl = 123456789
        job = self.scheduler.enqueue_at(datetime.utcnow(),
                                        say_hello,
                                        job_ttl=job_ttl)
        self.assertEqual(job.ttl, job_ttl)

    def test_enqueue_at_sets_job_result_ttl(self):
        """
        Ensure that a job scheduled via enqueue_at can be created with a custom result ttl.
        """
        job_result_ttl = 1234567890
        job = self.scheduler.enqueue_at(datetime.utcnow(),
                                        say_hello,
                                        job_result_ttl=job_result_ttl)
        self.assertEqual(job.result_ttl, job_result_ttl)

    def test_enqueue_at_sets_meta(self):
        """
        Ensure that a job scheduled via enqueue_at can be created with a custom meta.
        """
        meta = {'say': 'hello'}
        job = self.scheduler.enqueue_at(datetime.utcnow(),
                                        say_hello,
                                        meta=meta)
        self.assertEqual(job.meta, meta)

    def test_enqueue_in(self):
        """
        Ensure that jobs have the right scheduled time.
        """
        right_now = datetime.utcnow()
        time_delta = timedelta(minutes=1)
        job = self.scheduler.enqueue_in(time_delta, say_hello)
        self.assertIn(
            job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))
        self.assertEqual(
            self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
            to_unix(right_now + time_delta))
        time_delta = timedelta(hours=1)
        job = self.scheduler.enqueue_in(time_delta, say_hello)
        self.assertEqual(
            self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
            to_unix(right_now + time_delta))

    def test_enqueue_in_sets_timeout(self):
        """
        Ensure that a job scheduled via enqueue_in can be created with
        a custom timeout.
        """
        timeout = 13
        job = self.scheduler.enqueue_in(timedelta(minutes=1),
                                        say_hello,
                                        timeout=timeout)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job_from_queue.timeout, timeout)

    def test_enqueue_in_sets_job_id(self):
        """
        Ensure that a job scheduled via enqueue_in can be created with
        a custom job id.
        """
        job_id = 'test_id'
        job = self.scheduler.enqueue_in(timedelta(minutes=1),
                                        say_hello,
                                        job_id=job_id)
        self.assertEqual(job.id, job_id)

    def test_enqueue_in_sets_job_ttl(self):
        """
        Ensure that a job scheduled via enqueue_in can be created with a custom job ttl.
        """
        job_ttl = 123456789
        job = self.scheduler.enqueue_in(timedelta(minutes=1),
                                        say_hello,
                                        job_ttl=job_ttl)
        self.assertEqual(job.ttl, job_ttl)

    def test_enqueue_in_sets_job_result_ttl(self):
        """
        Ensure that a job scheduled via enqueue_in can be created with a custom result ttl.
        """
        job_result_ttl = 1234567890
        job = self.scheduler.enqueue_in(timedelta(minutes=1),
                                        say_hello,
                                        job_result_ttl=job_result_ttl)
        self.assertEqual(job.result_ttl, job_result_ttl)

    def test_enqueue_in_sets_meta(self):
        """
        Ensure that a job scheduled via enqueue_in sets meta.
        """
        meta = {'say': 'hello'}
        job = self.scheduler.enqueue_in(timedelta(minutes=1),
                                        say_hello,
                                        meta=meta)
        self.assertEqual(job.meta, meta)

    def test_count(self):
        now = datetime.utcnow()
        self.scheduler.enqueue_at(now, say_hello)
        self.assertEqual(self.scheduler.count(), 1)

        future_time = now + timedelta(hours=1)
        future_test_time = now + timedelta(minutes=59, seconds=59)

        self.scheduler.enqueue_at(future_time, say_hello)

        self.assertEqual(
            self.scheduler.count(timedelta(minutes=59, seconds=59)), 1)
        self.assertEqual(self.scheduler.count(future_test_time), 1)
        self.assertEqual(self.scheduler.count(), 2)

    def test_get_jobs(self):
        """
        Ensure get_jobs() returns all jobs until the specified time.
        """
        now = datetime.utcnow()
        job = self.scheduler.enqueue_at(now, say_hello)
        self.assertIn(job, self.scheduler.get_jobs(now))
        future_time = now + timedelta(hours=1)
        job = self.scheduler.enqueue_at(future_time, say_hello)
        self.assertIn(job,
                      self.scheduler.get_jobs(timedelta(hours=1, seconds=1)))
        self.assertIn(job,
                      [j[0] for j in self.scheduler.get_jobs(with_times=True)])
        self.assertIsInstance(
            list(self.scheduler.get_jobs(with_times=True))[0][1], datetime)
        self.assertNotIn(
            job, self.scheduler.get_jobs(timedelta(minutes=59, seconds=59)))

    def test_get_jobs_slice(self):
        """
        Ensure get_jobs() returns the appropriate slice of all jobs using offset and length.
        """
        now = datetime.utcnow()
        future_time = now + timedelta(hours=1)
        future_test_time = now + timedelta(minutes=59, seconds=59)

        # Schedule each job a second later than the previous job,
        # otherwise Redis will return jobs that have the same scheduled time in
        # lexicographical order (not the order in which we enqueued them)
        now_jobs = [
            self.scheduler.enqueue_at(now + timedelta(seconds=x), say_hello)
            for x in range(15)
        ]
        future_jobs = [
            self.scheduler.enqueue_at(future_time + timedelta(seconds=x),
                                      say_hello) for x in range(15)
        ]

        expected_slice = now_jobs[
            5:] + future_jobs[:
                              10]  # last 10 from now_jobs and first 10 from future_jobs
        expected_until_slice = now_jobs[5:]  # last 10 from now_jobs

        jobs = self.scheduler.get_jobs()
        jobs_slice = self.scheduler.get_jobs(offset=5, length=20)
        jobs_until_slice = self.scheduler.get_jobs(future_test_time,
                                                   offset=5,
                                                   length=20)

        self.assertEqual(now_jobs + future_jobs, list(jobs))
        self.assertEqual(expected_slice, list(jobs_slice))
        self.assertEqual(expected_until_slice, list(jobs_until_slice))

    def test_get_jobs_to_queue(self):
        """
        Ensure that jobs scheduled the future are not queued.
        """
        now = datetime.utcnow()
        job = self.scheduler.enqueue_at(now, say_hello)
        self.assertIn(job, self.scheduler.get_jobs_to_queue())
        future_time = now + timedelta(hours=1)
        job = self.scheduler.enqueue_at(future_time, say_hello)
        self.assertNotIn(job, self.scheduler.get_jobs_to_queue())

    def test_enqueue_job(self):
        """
        When scheduled job is enqueued, make sure:
        - Job is removed from the sorted set of scheduled jobs
        - "enqueued_at" attribute is properly set
        - Job appears in the right queue
        - Queue is recognized by rq's Queue.all()
        """
        now = datetime.utcnow()
        queue_name = 'foo'
        scheduler = Scheduler(connection=self.testconn, queue_name=queue_name)

        job = scheduler.enqueue_at(now, say_hello)
        self.scheduler.enqueue_job(job)
        self.assertNotIn(
            job, tl(self.testconn.zrange(scheduler.scheduled_jobs_key, 0, 10)))
        job = Job.fetch(job.id, connection=self.testconn)
        self.assertTrue(job.enqueued_at is not None)
        queue = scheduler.get_queue_for_job(job)
        self.assertIn(job, queue.jobs)
        queue = Queue.from_queue_key('rq:queue:{0}'.format(queue_name))
        self.assertIn(job, queue.jobs)
        self.assertIn(queue, Queue.all())

    def test_enqueue_job_with_scheduler_queue(self):
        """
        Ensure that job is enqueued correctly when the scheduler is bound
        to a queue object and job queue name is not provided.
        """
        queue = Queue('foo', connection=self.testconn)
        scheduler = Scheduler(connection=self.testconn, queue=queue)
        job = scheduler._create_job(say_hello)
        scheduler_queue = scheduler.get_queue_for_job(job)
        self.assertEqual(queue, scheduler_queue)
        scheduler.enqueue_job(job)
        self.assertTrue(job.enqueued_at is not None)
        self.assertIn(job, queue.jobs)
        self.assertIn(queue, Queue.all())

    def test_enqueue_job_with_job_queue_name(self):
        """
        Ensure that job is enqueued correctly when queue_name is provided
        at job creation
        """
        queue = Queue('foo', connection=self.testconn)
        job_queue = Queue('job_foo', connection=self.testconn)
        scheduler = Scheduler(connection=self.testconn, queue=queue)
        job = scheduler._create_job(say_hello, queue_name='job_foo')
        self.assertEqual(scheduler.get_queue_for_job(job), job_queue)
        scheduler.enqueue_job(job)
        self.assertTrue(job.enqueued_at is not None)
        self.assertIn(job, job_queue.jobs)
        self.assertIn(job_queue, Queue.all())

    def test_enqueue_at_with_job_queue_name(self):
        """
        Ensure that job is enqueued correctly when queue_name is provided
        to enqueue_at
        """
        queue = Queue('foo', connection=self.testconn)
        job_queue = Queue('job_foo', connection=self.testconn)
        scheduler = Scheduler(connection=self.testconn, queue=queue)
        job = scheduler.enqueue_at(datetime.utcnow(),
                                   say_hello,
                                   queue_name='job_foo')
        self.assertEqual(scheduler.get_queue_for_job(job), job_queue)
        self.scheduler.enqueue_job(job)
        self.assertTrue(job.enqueued_at is not None)
        self.assertIn(job, job_queue.jobs)
        self.assertIn(job_queue, Queue.all())

    def test_job_membership(self):
        now = datetime.utcnow()
        job = self.scheduler.enqueue_at(now, say_hello)
        self.assertIn(job, self.scheduler)
        self.assertIn(job.id, self.scheduler)
        self.assertNotIn("non-existing-job-id", self.scheduler)

    def test_cancel_scheduled_job(self):
        """
        When scheduled job is canceled, make sure:
        - Job is removed from the sorted set of scheduled jobs
        """
        # schedule a job to be enqueued one minute from now
        time_delta = timedelta(minutes=1)
        job = self.scheduler.enqueue_in(time_delta, say_hello)
        # cancel the scheduled job and check that it's gone from the set
        self.scheduler.cancel(job)
        self.assertNotIn(
            job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))

    def test_change_execution_time(self):
        """
        Ensure ``change_execution_time`` is called, ensure that job's score is updated
        """
        job = self.scheduler.enqueue_at(datetime.utcnow(), say_hello)
        new_date = datetime(2010, 1, 1)
        self.scheduler.change_execution_time(job, new_date)
        self.assertEqual(
            to_unix(new_date),
            self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id))
        self.scheduler.cancel(job)
        self.assertRaises(ValueError, self.scheduler.change_execution_time,
                          job, new_date)

    def test_args_kwargs_are_passed_correctly(self):
        """
        Ensure that arguments and keyword arguments are properly saved to jobs.
        """
        job = self.scheduler.enqueue_at(datetime.utcnow(), simple_addition, 1,
                                        1, 1)
        self.assertEqual(job.args, (1, 1, 1))
        job = self.scheduler.enqueue_at(datetime.utcnow(),
                                        simple_addition,
                                        z=1,
                                        y=1,
                                        x=1)
        self.assertEqual(job.kwargs, {'x': 1, 'y': 1, 'z': 1})
        job = self.scheduler.enqueue_at(datetime.utcnow(),
                                        simple_addition,
                                        1,
                                        z=1,
                                        y=1)
        self.assertEqual(job.kwargs, {'y': 1, 'z': 1})
        self.assertEqual(job.args, (1, ))

        time_delta = timedelta(minutes=1)
        job = self.scheduler.enqueue_in(time_delta, simple_addition, 1, 1, 1)
        self.assertEqual(job.args, (1, 1, 1))
        job = self.scheduler.enqueue_in(time_delta,
                                        simple_addition,
                                        z=1,
                                        y=1,
                                        x=1)
        self.assertEqual(job.kwargs, {'x': 1, 'y': 1, 'z': 1})
        job = self.scheduler.enqueue_in(time_delta,
                                        simple_addition,
                                        1,
                                        z=1,
                                        y=1)
        self.assertEqual(job.kwargs, {'y': 1, 'z': 1})
        self.assertEqual(job.args, (1, ))

    def test_interval_and_repeat_persisted_correctly(self):
        """
        Ensure that interval and repeat attributes are correctly saved.
        """
        job = self.scheduler.schedule(datetime.utcnow(),
                                      say_hello,
                                      interval=10,
                                      repeat=11)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job_from_queue.meta['interval'], 10)
        self.assertEqual(job_from_queue.meta['repeat'], 11)

    def test_crontab_persisted_correctly(self):
        """
        Ensure that crontab attribute gets correctly saved in Redis.
        """
        # create a job that runs one minute past each whole hour
        job = self.scheduler.cron("1 * * * *", say_hello)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job_from_queue.meta['cron_string'], "1 * * * *")

        # get the scheduled_time and convert it to a datetime object
        unix_time = self.testconn.zscore(self.scheduler.scheduled_jobs_key,
                                         job.id)
        datetime_time = from_unix(unix_time)

        # check that minute=1, seconds=0, and is within an hour
        assert datetime_time.minute == 1
        assert datetime_time.second == 0
        assert datetime_time - datetime.utcnow() < timedelta(hours=1)

    def test_crontab_persisted_correctly_with_local_timezone(self):
        """
        Ensure that crontab attribute gets correctly saved in Redis when using local TZ.
        """
        # create a job that runs one minute past each whole hour
        job = self.scheduler.cron("0 15 * * *",
                                  say_hello,
                                  use_local_timezone=True)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job_from_queue.meta['cron_string'], "0 15 * * *")

        # get the scheduled_time and convert it to a datetime object
        unix_time = self.testconn.zscore(self.scheduler.scheduled_jobs_key,
                                         job.id)
        datetime_time = from_unix(unix_time)

        expected_datetime_in_local_tz = datetime.now(
            get_utc_timezone()).replace(hour=15,
                                        minute=0,
                                        second=0,
                                        microsecond=0)
        assert datetime_time.time(
        ) == expected_datetime_in_local_tz.astimezone(
            get_utc_timezone()).time()

    def test_crontab_rescheduled_correctly_with_local_timezone(self):
        # Create a job with a cronjob_string
        job = self.scheduler.cron("1 15 * * *",
                                  say_hello,
                                  use_local_timezone=True)

        # change crontab
        job.meta['cron_string'] = "2 15 * * *"

        # reenqueue the job
        self.scheduler.enqueue_job(job)

        # get the scheduled_time and convert it to a datetime object
        unix_time = self.testconn.zscore(self.scheduler.scheduled_jobs_key,
                                         job.id)
        datetime_time = from_unix(unix_time)

        expected_datetime_in_local_tz = datetime.now(
            get_utc_timezone()).replace(hour=15,
                                        minute=2,
                                        second=0,
                                        microsecond=0)
        assert datetime_time.time(
        ) == expected_datetime_in_local_tz.astimezone(
            get_utc_timezone()).time()

    def test_crontab_sets_timeout(self):
        """
        Ensure that a job scheduled via crontab can be created with
        a custom timeout.
        """
        timeout = 13
        job = self.scheduler.cron("1 * * * *", say_hello, timeout=timeout)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job_from_queue.timeout, timeout)

    def test_crontab_sets_id(self):
        """
        Ensure that a job scheduled via crontab can be created with
        a custom id
        """
        job_id = "hello-job-id"
        job = self.scheduler.cron("1 * * * *", say_hello, id=job_id)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job_id, job_from_queue.id)

    def test_crontab_sets_default_result_ttl(self):
        """
        Ensure that a job scheduled via crontab gets proper default
        result_ttl (-1) periodic tasks.
        """
        job = self.scheduler.cron("1 * * * *", say_hello)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(-1, job_from_queue.result_ttl)

    def test_crontab_sets_description(self):
        """
        Ensure that a job scheduled via crontab can be created with
        a custom description
        """
        description = 'test description'
        job = self.scheduler.cron("1 * * * *",
                                  say_hello,
                                  description=description)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(description, job_from_queue.description)

    def test_repeat_without_interval_raises_error(self):
        # Ensure that an error is raised if repeat is specified without interval
        def create_job():
            self.scheduler.schedule(datetime.utcnow(), say_hello, repeat=11)

        self.assertRaises(ValueError, create_job)

    def test_job_with_intervals_get_rescheduled(self):
        """
        Ensure jobs with interval attribute are put back in the scheduler
        """
        time_now = datetime.utcnow()
        interval = 10
        job = self.scheduler.schedule(time_now, say_hello, interval=interval)
        self.scheduler.enqueue_job(job)
        self.assertIn(
            job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))
        self.assertEqual(
            self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
            to_unix(time_now) + interval)

    def test_job_with_interval_can_set_meta(self):
        """
        Ensure that jobs with interval attribute can be created with meta
        """
        time_now = datetime.utcnow()
        interval = 10
        meta = {'say': 'hello'}
        job = self.scheduler.schedule(time_now,
                                      say_hello,
                                      interval=interval,
                                      meta=meta)
        self.scheduler.enqueue_job(job)
        self.assertEqual(job.meta, meta)

    def test_job_with_crontab_get_rescheduled(self):
        # Create a job with a cronjob_string
        job = self.scheduler.cron("1 * * * *", say_hello)

        # current unix_time
        old_next_scheduled_time = self.testconn.zscore(
            self.scheduler.scheduled_jobs_key, job.id)

        # change crontab
        job.meta['cron_string'] = "2 * * * *"

        # enqueue the job
        self.scheduler.enqueue_job(job)

        self.assertIn(
            job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))

        # check that next scheduled time has changed
        self.assertNotEqual(
            old_next_scheduled_time,
            self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id))

        # check that new next scheduled time is set correctly
        expected_next_scheduled_time = to_unix(
            get_next_scheduled_time("2 * * * *"))
        self.assertEqual(
            self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
            expected_next_scheduled_time)

    def test_job_with_repeat(self):
        """
        Ensure jobs with repeat attribute are put back in the scheduler
        X (repeat) number of times
        """
        time_now = datetime.utcnow()
        interval = 10
        # If job is repeated once, the job shouldn't be put back in the queue
        job = self.scheduler.schedule(time_now,
                                      say_hello,
                                      interval=interval,
                                      repeat=1)
        self.scheduler.enqueue_job(job)
        self.assertNotIn(
            job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))

        # If job is repeated twice, it should only be put back in the queue once
        job = self.scheduler.schedule(time_now,
                                      say_hello,
                                      interval=interval,
                                      repeat=2)
        self.scheduler.enqueue_job(job)
        self.assertIn(
            job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))
        self.scheduler.enqueue_job(job)
        self.assertNotIn(
            job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))

    def test_missing_jobs_removed_from_scheduler(self):
        """
        Ensure jobs that don't exist when queued are removed from the scheduler.
        """
        job = self.scheduler.schedule(datetime.utcnow(), say_hello)
        job.cancel()
        list(self.scheduler.get_jobs_to_queue())
        self.assertIn(
            job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))
        job.delete()
        list(self.scheduler.get_jobs_to_queue())
        self.assertNotIn(
            job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))

    def test_periodic_jobs_sets_result_ttl(self):
        """
        Ensure periodic jobs set result_ttl to infinite.
        """
        job = self.scheduler.schedule(datetime.utcnow(), say_hello, interval=5)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job.result_ttl, -1)

    def test_periodic_jobs_sets_ttl(self):
        """
        Ensure periodic jobs sets correctly ttl.
        """
        job = self.scheduler.schedule(datetime.utcnow(),
                                      say_hello,
                                      interval=5,
                                      ttl=4)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job.ttl, 4)

    def test_periodic_jobs_sets_meta(self):
        """
        Ensure periodic jobs sets correctly meta.
        """
        meta = {'say': 'hello'}
        job = self.scheduler.schedule(datetime.utcnow(),
                                      say_hello,
                                      interval=5,
                                      meta=meta)
        self.assertEqual(meta, job.meta)

    def test_periodic_job_sets_id(self):
        """
        Ensure that ID is passed to RQ by schedule.
        """
        job = self.scheduler.schedule(datetime.utcnow(),
                                      say_hello,
                                      interval=5,
                                      id='id test')
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual('id test', job.id)

    def test_periodic_job_sets_description(self):
        """
        Ensure that description is passed to RQ by schedule.
        """
        job = self.scheduler.schedule(datetime.utcnow(),
                                      say_hello,
                                      interval=5,
                                      description='description')
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual('description', job.description)

    def test_run(self):
        """
        Check correct signal handling in Scheduler.run().
        """
        def send_stop_signal():
            """
            Sleep for 1 second, then send a INT signal to ourself, so the
            signal handler installed by scheduler.run() is called.
            """
            time.sleep(1)
            os.kill(os.getpid(), signal.SIGINT)

        thread = Thread(target=send_stop_signal)
        thread.start()
        self.assertRaises(SystemExit, self.scheduler.run)
        thread.join()

    def test_run_burst(self):
        """
        Check burst mode of Scheduler.run().
        """
        now = datetime.utcnow()
        job = self.scheduler.enqueue_at(now, say_hello)
        self.assertIn(job, self.scheduler.get_jobs_to_queue())
        self.assertEqual(len(list(self.scheduler.get_jobs())), 1)
        self.scheduler.run(burst=True)
        self.assertEqual(len(list(self.scheduler.get_jobs())), 0)

    def test_scheduler_w_o_explicit_connection(self):
        """
        Ensure instantiating Scheduler w/o explicit connection works.
        """
        s = Scheduler()
        self.assertEqual(s.connection, self.testconn)

    def test_small_float_interval(self):
        """
        Test that scheduler accepts 'interval' of type float, less than 1 second.
        """
        key = Scheduler.scheduler_key
        lock_key = '%s_lock' % Scheduler.scheduler_key
        self.assertNotIn(key, tl(self.testconn.keys('*')))
        scheduler = Scheduler(connection=self.testconn,
                              interval=0.1)  # testing interval = 0.1 second
        self.assertEqual(scheduler._interval, 0.1)

        #acquire lock
        self.assertTrue(scheduler.acquire_lock())
        self.assertIn(lock_key, tl(self.testconn.keys('*')))
        self.assertEqual(self.testconn.ttl(lock_key), 10)  # int(0.1) + 10 = 10

        #enqueue a job
        now = datetime.utcnow()
        job = scheduler.enqueue_at(now, say_hello)
        self.assertIn(job, self.scheduler.get_jobs_to_queue())
        self.assertEqual(len(list(self.scheduler.get_jobs())), 1)

        #remove the lock
        scheduler.remove_lock()

        #test that run works with the small floating-point interval
        def send_stop_signal():
            """
            Sleep for 1 second, then send a INT signal to ourself, so the
            signal handler installed by scheduler.run() is called.
            """
            time.sleep(1)
            os.kill(os.getpid(), signal.SIGINT)

        thread = Thread(target=send_stop_signal)
        thread.start()
        self.assertRaises(SystemExit, scheduler.run)
        thread.join()

        #all jobs must have been scheduled during 1 second
        self.assertEqual(len(list(scheduler.get_jobs())), 0)

    def test_get_queue_for_job_with_job_queue_name(self):
        """
        Tests that scheduler gets the correct queue for the job when
        queue_name is provided.
        """
        queue = Queue('scheduler_foo', connection=self.testconn)
        job_queue = Queue('job_foo', connection=self.testconn)
        scheduler = Scheduler(connection=self.testconn, queue=queue)
        job = scheduler._create_job(say_hello, queue_name='job_foo')
        self.assertEqual(scheduler.get_queue_for_job(job), job_queue)

    def test_get_queue_for_job_without_job_queue_name(self):
        """
        Tests that scheduler gets the scheduler queue for the job
        when queue name is not provided for that job.
        """
        queue = Queue('scheduler_foo', connection=self.testconn)
        scheduler = Scheduler(connection=self.testconn, queue=queue)
        job = scheduler._create_job(say_hello)
        self.assertEqual(scheduler.get_queue_for_job(job), queue)
Exemple #30
0
def stop_fetching():
    scheduler = Scheduler(connection=get_connection())
    for j in scheduler.get_jobs():
        scheduler.cancel(j)
settings = Settings()

# launch scheduled jobs
import datetime
from brokenpromises.worker     import worker
from rq_scheduler              import Scheduler
from brokenpromises.operations import CollectNext7days, CollectNext2Months, CollectNext2Years, CollectToday, MrClean
import redis
conn           = redis.from_url(settings.REDIS_URL)
scheduler      = Scheduler(connection=conn)
scheduled_jobs = scheduler.get_jobs()
# remove all jobs with interval
for job in scheduled_jobs:
	if "RunAndReplaceIntTheQueuePeriodically" in job.description:
		scheduler.cancel(job)

today = datetime.datetime.now()
# net midnight
next_midnight = today + datetime.timedelta(days=1)
next_midnight = datetime.datetime(next_midnight.year, next_midnight.month, next_midnight.day, 0, 10)
# next month
year          = today.year + (today.month + 1) / 12
month         = today.month % 12 + 1
next_month    = datetime.datetime(year, month, 1, 0, 10)
#next new year
next_year     = datetime.datetime(today.year + 1, 1, 1, 0, 20)

# enqueue periodic jobs
worker.schedule_periodically(date=next_midnight, frequence="daily"  , collector=CollectToday())
worker.schedule_periodically(date=next_midnight, frequence="daily"  , collector=CollectNext7days())
Exemple #32
0
# @desc:

from redis import Redis
from rq import Queue
from rq_scheduler import Scheduler
from datetime import datetime, timedelta

from rqtest.rq_test import redis_conn
from rqtest.worker import task2, task4, task3, task1


class A(object):
    def __init__(self):
        self.name = 'aniu'


scheduler = Scheduler(connection=redis_conn)
scheduler1 = Scheduler(queue_name='low', connection=redis_conn)
scheduler2 = Scheduler(queue_name='high', connection=redis_conn)
a = A()

if __name__ == "__main__":
    print('a = ', a.name)
    scheduler1.enqueue_in(timedelta(seconds=5), task1, a)
    scheduler2.enqueue_in(timedelta(seconds=10), task3, '叁大爷好')
    scheduler.enqueue_in(timedelta(seconds=15), task4, '撕大爷好')
    if 0:
        for job in scheduler2.get_jobs():
            scheduler2.cancel(job)
            print('2取消任务, 任务名={0!r}'.format(job))
Exemple #33
0
class TestScheduler(RQTestCase):

    def setUp(self):
        super(TestScheduler, self).setUp()
        self.scheduler = Scheduler(connection=self.testconn)

    def test_acquire_lock(self):
        """
        When scheduler acquires a lock, besides creating a key, it should
        also set an expiry that's a few seconds longer than it's polling
        interval so it automatically expires if scheduler is unexpectedly
        terminated.
        """
        key = '%s_lock' % Scheduler.scheduler_key
        self.assertNotIn(key, tl(self.testconn.keys('*')))
        scheduler = Scheduler(connection=self.testconn, interval=20)
        self.assertTrue(scheduler.acquire_lock())
        self.assertIn(key, tl(self.testconn.keys('*')))
        self.assertEqual(self.testconn.ttl(key), 30)
        scheduler.remove_lock()
        self.assertNotIn(key, tl(self.testconn.keys('*')))

    def test_no_two_schedulers_acquire_lock(self):
        """
        Ensure that no two schedulers can acquire the lock at the
        same time. When removing the lock, only the scheduler which
        originally acquired the lock can remove the lock.
        """
        key = '%s_lock' % Scheduler.scheduler_key
        self.assertNotIn(key, tl(self.testconn.keys('*')))
        scheduler1 = Scheduler(connection=self.testconn, interval=20)
        scheduler2 = Scheduler(connection=self.testconn, interval=20)
        self.assertTrue(scheduler1.acquire_lock())
        self.assertFalse(scheduler2.acquire_lock())
        self.assertIn(key, tl(self.testconn.keys('*')))
        scheduler2.remove_lock()
        self.assertIn(key, tl(self.testconn.keys('*')))
        scheduler1.remove_lock()
        self.assertNotIn(key, tl(self.testconn.keys('*')))

    def test_create_job(self):
        """
        Ensure that jobs are created properly.
        """
        job = self.scheduler._create_job(say_hello, args=(), kwargs={})
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job, job_from_queue)
        self.assertEqual(job_from_queue.func, say_hello)

    def test_create_job_with_ttl(self):
        """
        Ensure that TTL is passed to RQ.
        """
        job = self.scheduler._create_job(say_hello, ttl=2, args=(), kwargs={})
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(2, job_from_queue.ttl)

    def test_create_job_with_id(self):
        """
        Ensure that ID is passed to RQ.
        """
        job = self.scheduler._create_job(say_hello, id='id test', args=(), kwargs={})
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual('id test', job_from_queue.id)

    def test_create_job_with_description(self):
        """
        Ensure that description is passed to RQ.
        """
        job = self.scheduler._create_job(say_hello, description='description', args=(), kwargs={})
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual('description', job_from_queue.description)

    def test_create_job_with_timeout(self):
        """
        Ensure that timeout is passed to RQ.
        """
        timeout = 13
        job = self.scheduler._create_job(say_hello, timeout=13, args=(), kwargs={})
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(timeout, job_from_queue.timeout)

    def test_job_not_persisted_if_commit_false(self):
        """
        Ensure jobs are only saved to Redis if commit=True.
        """
        job = self.scheduler._create_job(say_hello, commit=False)
        self.assertEqual(self.testconn.hgetall(job.key), {})

    def test_create_scheduled_job(self):
        """
        Ensure that scheduled jobs are put in the scheduler queue with the right score
        """
        scheduled_time = datetime.utcnow()
        job = self.scheduler.enqueue_at(scheduled_time, say_hello)
        self.assertEqual(job, Job.fetch(job.id, connection=self.testconn))
        self.assertIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))
        self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
                         to_unix(scheduled_time))

    def test_create_job_with_meta(self):
        """
        Ensure that meta information on the job is passed to rq
        """
        expected = {'say': 'hello'}
        job = self.scheduler._create_job(say_hello, meta=expected)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(expected, job_from_queue.meta)

    def test_enqueue_at_sets_timeout(self):
        """
        Ensure that a job scheduled via enqueue_at can be created with
        a custom timeout.
        """
        timeout = 13
        job = self.scheduler.enqueue_at(datetime.utcnow(), say_hello, timeout=timeout)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job_from_queue.timeout, timeout)

    def test_enqueue_at_sets_job_id(self):
        """
        Ensure that a job scheduled via enqueue_at can be created with
        a custom job id.
        """
        job_id = 'test_id'
        job = self.scheduler.enqueue_at(datetime.utcnow(), say_hello, job_id=job_id)
        self.assertEqual(job.id, job_id)

    def test_enqueue_at_sets_job_ttl(self):
        """
        Ensure that a job scheduled via enqueue_at can be created with a custom job ttl.
        """
        job_ttl = 123456789
        job = self.scheduler.enqueue_at(datetime.utcnow(), say_hello, job_ttl=job_ttl)
        self.assertEqual(job.ttl, job_ttl)

    def test_enqueue_at_sets_job_result_ttl(self):
        """
        Ensure that a job scheduled via enqueue_at can be created with a custom result ttl.
        """
        job_result_ttl = 1234567890
        job = self.scheduler.enqueue_at(datetime.utcnow(), say_hello, job_result_ttl=job_result_ttl)
        self.assertEqual(job.result_ttl, job_result_ttl)

    def test_enqueue_at_sets_meta(self):
        """
        Ensure that a job scheduled via enqueue_at can be created with a custom meta.
        """
        meta = {'say': 'hello'}
        job = self.scheduler.enqueue_at(datetime.utcnow(), say_hello, meta=meta)
        self.assertEqual(job.meta, meta)

    def test_enqueue_in(self):
        """
        Ensure that jobs have the right scheduled time.
        """
        right_now = datetime.utcnow()
        time_delta = timedelta(minutes=1)
        job = self.scheduler.enqueue_in(time_delta, say_hello)
        self.assertIn(job.id,
                      tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))
        self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
                         to_unix(right_now + time_delta))
        time_delta = timedelta(hours=1)
        job = self.scheduler.enqueue_in(time_delta, say_hello)
        self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
                         to_unix(right_now + time_delta))

    def test_enqueue_in_sets_timeout(self):
        """
        Ensure that a job scheduled via enqueue_in can be created with
        a custom timeout.
        """
        timeout = 13
        job = self.scheduler.enqueue_in(timedelta(minutes=1), say_hello, timeout=timeout)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job_from_queue.timeout, timeout)

    def test_enqueue_in_sets_job_id(self):
        """
        Ensure that a job scheduled via enqueue_in can be created with
        a custom job id.
        """
        job_id = 'test_id'
        job = self.scheduler.enqueue_in(timedelta(minutes=1), say_hello, job_id=job_id)
        self.assertEqual(job.id, job_id)

    def test_enqueue_in_sets_job_ttl(self):
        """
        Ensure that a job scheduled via enqueue_in can be created with a custom job ttl.
        """
        job_ttl = 123456789
        job = self.scheduler.enqueue_in(timedelta(minutes=1), say_hello, job_ttl=job_ttl)
        self.assertEqual(job.ttl, job_ttl)

    def test_enqueue_in_sets_job_result_ttl(self):
        """
        Ensure that a job scheduled via enqueue_in can be created with a custom result ttl.
        """
        job_result_ttl = 1234567890
        job = self.scheduler.enqueue_in(timedelta(minutes=1), say_hello, job_result_ttl=job_result_ttl)
        self.assertEqual(job.result_ttl, job_result_ttl)

    def test_enqueue_in_sets_meta(self):
        """
        Ensure that a job scheduled via enqueue_in sets meta.
        """
        meta = {'say': 'hello'}
        job = self.scheduler.enqueue_in(timedelta(minutes=1), say_hello, meta=meta)
        self.assertEqual(job.meta, meta)

    def test_count(self):
        now = datetime.utcnow()
        self.scheduler.enqueue_at(now, say_hello)
        self.assertEqual(self.scheduler.count(), 1)

        future_time = now + timedelta(hours=1)
        future_test_time = now + timedelta(minutes=59, seconds=59)

        self.scheduler.enqueue_at(future_time, say_hello)

        self.assertEqual(self.scheduler.count(timedelta(minutes=59, seconds=59)), 1)
        self.assertEqual(self.scheduler.count(future_test_time), 1)
        self.assertEqual(self.scheduler.count(), 2)

    def test_get_jobs(self):
        """
        Ensure get_jobs() returns all jobs until the specified time.
        """
        now = datetime.utcnow()
        job = self.scheduler.enqueue_at(now, say_hello)
        self.assertIn(job, self.scheduler.get_jobs(now))
        future_time = now + timedelta(hours=1)
        job = self.scheduler.enqueue_at(future_time, say_hello)
        self.assertIn(job, self.scheduler.get_jobs(timedelta(hours=1, seconds=1)))
        self.assertIn(job, [j[0] for j in self.scheduler.get_jobs(with_times=True)])
        self.assertIsInstance(list(self.scheduler.get_jobs(with_times=True))[0][1], datetime)
        self.assertNotIn(job, self.scheduler.get_jobs(timedelta(minutes=59, seconds=59)))

    def test_get_jobs_slice(self):
        """
        Ensure get_jobs() returns the appropriate slice of all jobs using offset and length.
        """
        now = datetime.utcnow()
        future_time = now + timedelta(hours=1)
        future_test_time = now + timedelta(minutes=59, seconds=59)

        # Schedule each job a second later than the previous job,
        # otherwise Redis will return jobs that have the same scheduled time in
        # lexicographical order (not the order in which we enqueued them)
        now_jobs = [self.scheduler.enqueue_at(now + timedelta(seconds=x), say_hello)
                    for x in range(15)]
        future_jobs = [self.scheduler.enqueue_at(future_time + timedelta(seconds=x), say_hello)
                       for x in range(15)]

        expected_slice = now_jobs[5:] + future_jobs[:10]   # last 10 from now_jobs and first 10 from future_jobs
        expected_until_slice = now_jobs[5:]                # last 10 from now_jobs

        jobs = self.scheduler.get_jobs()
        jobs_slice = self.scheduler.get_jobs(offset=5, length=20)
        jobs_until_slice = self.scheduler.get_jobs(future_test_time, offset=5, length=20)

        self.assertEqual(now_jobs + future_jobs, list(jobs))
        self.assertEqual(expected_slice, list(jobs_slice))
        self.assertEqual(expected_until_slice, list(jobs_until_slice))

    def test_get_jobs_to_queue(self):
        """
        Ensure that jobs scheduled the future are not queued.
        """
        now = datetime.utcnow()
        job = self.scheduler.enqueue_at(now, say_hello)
        self.assertIn(job, self.scheduler.get_jobs_to_queue())
        future_time = now + timedelta(hours=1)
        job = self.scheduler.enqueue_at(future_time, say_hello)
        self.assertNotIn(job, self.scheduler.get_jobs_to_queue())

    def test_enqueue_job(self):
        """
        When scheduled job is enqueued, make sure:
        - Job is removed from the sorted set of scheduled jobs
        - "enqueued_at" attribute is properly set
        - Job appears in the right queue
        - Queue is recognized by rq's Queue.all()
        """
        now = datetime.utcnow()
        queue_name = 'foo'
        scheduler = Scheduler(connection=self.testconn, queue_name=queue_name)

        job = scheduler.enqueue_at(now, say_hello)
        self.scheduler.enqueue_job(job)
        self.assertNotIn(job, tl(self.testconn.zrange(scheduler.scheduled_jobs_key, 0, 10)))
        job = Job.fetch(job.id, connection=self.testconn)
        self.assertTrue(job.enqueued_at is not None)
        queue = scheduler.get_queue_for_job(job)
        self.assertIn(job, queue.jobs)
        queue = Queue.from_queue_key('rq:queue:{0}'.format(queue_name))
        self.assertIn(job, queue.jobs)
        self.assertIn(queue, Queue.all())

    def test_enqueue_job_with_queue(self):
        """
        Ensure that job is enqueued correctly when the scheduler is bound
        to a queue object.
        """
        queue = Queue('foo', connection=self.testconn)
        scheduler = Scheduler(connection=self.testconn, queue=queue)
        job = scheduler._create_job(say_hello)
        scheduler_queue = scheduler.get_queue_for_job(job)
        self.assertEqual(queue, scheduler_queue)
        scheduler.enqueue_job(job)
        self.assertTrue(job.enqueued_at is not None)
        self.assertIn(job, queue.jobs)
        self.assertIn(queue, Queue.all())

    def test_job_membership(self):
        now = datetime.utcnow()
        job = self.scheduler.enqueue_at(now, say_hello)
        self.assertIn(job, self.scheduler)
        self.assertIn(job.id, self.scheduler)
        self.assertNotIn("non-existing-job-id", self.scheduler)

    def test_cancel_scheduled_job(self):
        """
        When scheduled job is canceled, make sure:
        - Job is removed from the sorted set of scheduled jobs
        """
        # schedule a job to be enqueued one minute from now
        time_delta = timedelta(minutes=1)
        job = self.scheduler.enqueue_in(time_delta, say_hello)
        # cancel the scheduled job and check that it's gone from the set
        self.scheduler.cancel(job)
        self.assertNotIn(job.id, tl(self.testconn.zrange(
            self.scheduler.scheduled_jobs_key, 0, 1)))

    def test_change_execution_time(self):
        """
        Ensure ``change_execution_time`` is called, ensure that job's score is updated
        """
        job = self.scheduler.enqueue_at(datetime.utcnow(), say_hello)
        new_date = datetime(2010, 1, 1)
        self.scheduler.change_execution_time(job, new_date)
        self.assertEqual(to_unix(new_date),
            self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id))
        self.scheduler.cancel(job)
        self.assertRaises(ValueError, self.scheduler.change_execution_time, job, new_date)

    def test_args_kwargs_are_passed_correctly(self):
        """
        Ensure that arguments and keyword arguments are properly saved to jobs.
        """
        job = self.scheduler.enqueue_at(datetime.utcnow(), simple_addition, 1, 1, 1)
        self.assertEqual(job.args, (1, 1, 1))
        job = self.scheduler.enqueue_at(datetime.utcnow(), simple_addition, z=1, y=1, x=1)
        self.assertEqual(job.kwargs, {'x': 1, 'y': 1, 'z': 1})
        job = self.scheduler.enqueue_at(datetime.utcnow(), simple_addition, 1, z=1, y=1)
        self.assertEqual(job.kwargs, {'y': 1, 'z': 1})
        self.assertEqual(job.args, (1,))

        time_delta = timedelta(minutes=1)
        job = self.scheduler.enqueue_in(time_delta, simple_addition, 1, 1, 1)
        self.assertEqual(job.args, (1, 1, 1))
        job = self.scheduler.enqueue_in(time_delta, simple_addition, z=1, y=1, x=1)
        self.assertEqual(job.kwargs, {'x': 1, 'y': 1, 'z': 1})
        job = self.scheduler.enqueue_in(time_delta, simple_addition, 1, z=1, y=1)
        self.assertEqual(job.kwargs, {'y': 1, 'z': 1})
        self.assertEqual(job.args, (1,))

    def test_interval_and_repeat_persisted_correctly(self):
        """
        Ensure that interval and repeat attributes are correctly saved.
        """
        job = self.scheduler.schedule(datetime.utcnow(), say_hello, interval=10, repeat=11)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job_from_queue.meta['interval'], 10)
        self.assertEqual(job_from_queue.meta['repeat'], 11)

    def test_crontab_persisted_correctly(self):
        """
        Ensure that crontab attribute gets correctly saved in Redis.
        """
        # create a job that runs one minute past each whole hour
        job = self.scheduler.cron("1 * * * *", say_hello)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job_from_queue.meta['cron_string'], "1 * * * *")

        # get the scheduled_time and convert it to a datetime object
        unix_time = self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id)
        datetime_time = from_unix(unix_time)

        # check that minute=1, seconds=0, and is within an hour
        assert datetime_time.minute == 1
        assert datetime_time.second == 0
        assert datetime_time - datetime.utcnow() < timedelta(hours=1)

    def test_crontab_sets_timeout(self):
        """
        Ensure that a job scheduled via crontab can be created with
        a custom timeout.
        """
        timeout = 13
        job = self.scheduler.cron("1 * * * *", say_hello, timeout=timeout)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job_from_queue.timeout, timeout)

    def test_crontab_sets_id(self):
        """
        Ensure that a job scheduled via crontab can be created with
        a custom id
        """
        job_id = "hello-job-id"
        job = self.scheduler.cron("1 * * * *", say_hello, id=job_id)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job_id, job_from_queue.id)

    def test_crontab_sets_default_result_ttl(self):
        """
        Ensure that a job scheduled via crontab gets proper default
        result_ttl (-1) periodic tasks.
        """
        job = self.scheduler.cron("1 * * * *", say_hello)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(-1, job_from_queue.result_ttl)

    def test_crontab_sets_description(self):
        """
        Ensure that a job scheduled via crontab can be created with
        a custom description
        """
        description = 'test description'
        job = self.scheduler.cron("1 * * * *", say_hello, description=description)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(description, job_from_queue.description)

    def test_repeat_without_interval_raises_error(self):
        # Ensure that an error is raised if repeat is specified without interval
        def create_job():
            self.scheduler.schedule(datetime.utcnow(), say_hello, repeat=11)
        self.assertRaises(ValueError, create_job)

    def test_job_with_intervals_get_rescheduled(self):
        """
        Ensure jobs with interval attribute are put back in the scheduler
        """
        time_now = datetime.utcnow()
        interval = 10
        job = self.scheduler.schedule(time_now, say_hello, interval=interval)
        self.scheduler.enqueue_job(job)
        self.assertIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))
        self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
                         to_unix(time_now) + interval)

    def test_job_with_interval_can_set_meta(self):
        """
        Ensure that jobs with interval attribute can be created with meta
        """
        time_now = datetime.utcnow()
        interval = 10
        meta = {'say': 'hello'}
        job = self.scheduler.schedule(time_now, say_hello, interval=interval, meta=meta)
        self.scheduler.enqueue_job(job)
        self.assertEqual(job.meta, meta)

    def test_job_with_crontab_get_rescheduled(self):
        # Create a job with a cronjob_string
        job = self.scheduler.cron("1 * * * *", say_hello)

        # current unix_time
        old_next_scheduled_time = self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id)

        # change crontab
        job.meta['cron_string'] = "2 * * * *"

        # enqueue the job
        self.scheduler.enqueue_job(job)

        self.assertIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))

        # check that next scheduled time has changed
        self.assertNotEqual(old_next_scheduled_time,
                            self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id))

        # check that new next scheduled time is set correctly
        expected_next_scheduled_time = to_unix(get_next_scheduled_time("2 * * * *"))
        self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
                         expected_next_scheduled_time)

    def test_job_with_repeat(self):
        """
        Ensure jobs with repeat attribute are put back in the scheduler
        X (repeat) number of times
        """
        time_now = datetime.utcnow()
        interval = 10
        # If job is repeated once, the job shouldn't be put back in the queue
        job = self.scheduler.schedule(time_now, say_hello, interval=interval, repeat=1)
        self.scheduler.enqueue_job(job)
        self.assertNotIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))

        # If job is repeated twice, it should only be put back in the queue once
        job = self.scheduler.schedule(time_now, say_hello, interval=interval, repeat=2)
        self.scheduler.enqueue_job(job)
        self.assertIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))
        self.scheduler.enqueue_job(job)
        self.assertNotIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))

    def test_missing_jobs_removed_from_scheduler(self):
        """
        Ensure jobs that don't exist when queued are removed from the scheduler.
        """
        job = self.scheduler.schedule(datetime.utcnow(), say_hello)
        job.cancel()
        list(self.scheduler.get_jobs_to_queue())
        self.assertIn(job.id, tl(self.testconn.zrange(
            self.scheduler.scheduled_jobs_key, 0, 1)))
        job.delete()
        list(self.scheduler.get_jobs_to_queue())
        self.assertNotIn(job.id, tl(self.testconn.zrange(
            self.scheduler.scheduled_jobs_key, 0, 1)))

    def test_periodic_jobs_sets_result_ttl(self):
        """
        Ensure periodic jobs set result_ttl to infinite.
        """
        job = self.scheduler.schedule(datetime.utcnow(), say_hello, interval=5)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job.result_ttl, -1)

    def test_periodic_jobs_sets_ttl(self):
        """
        Ensure periodic jobs sets correctly ttl.
        """
        job = self.scheduler.schedule(datetime.utcnow(), say_hello, interval=5, ttl=4)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job.ttl, 4)

    def test_periodic_jobs_sets_meta(self):
        """
        Ensure periodic jobs sets correctly meta.
        """
        meta = {'say': 'hello'}
        job = self.scheduler.schedule(datetime.utcnow(), say_hello, interval=5, meta=meta)
        self.assertEqual(meta, job.meta)

    def test_periodic_job_sets_id(self):
        """
        Ensure that ID is passed to RQ by schedule.
        """
        job = self.scheduler.schedule(datetime.utcnow(), say_hello, interval=5, id='id test')
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual('id test', job.id)

    def test_periodic_job_sets_description(self):
        """
        Ensure that description is passed to RQ by schedule.
        """
        job = self.scheduler.schedule(datetime.utcnow(), say_hello, interval=5, description='description')
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual('description', job.description)

    def test_run(self):
        """
        Check correct signal handling in Scheduler.run().
        """
        def send_stop_signal():
            """
            Sleep for 1 second, then send a INT signal to ourself, so the
            signal handler installed by scheduler.run() is called.
            """
            time.sleep(1)
            os.kill(os.getpid(), signal.SIGINT)
        thread = Thread(target=send_stop_signal)
        thread.start()
        self.assertRaises(SystemExit, self.scheduler.run)
        thread.join()

    def test_run_burst(self):
        """
        Check burst mode of Scheduler.run().
        """
        now = datetime.utcnow()
        job = self.scheduler.enqueue_at(now, say_hello)
        self.assertIn(job, self.scheduler.get_jobs_to_queue())
        self.assertEqual(len(list(self.scheduler.get_jobs())), 1)
        self.scheduler.run(burst=True)
        self.assertEqual(len(list(self.scheduler.get_jobs())), 0)

    def test_scheduler_w_o_explicit_connection(self):
        """
        Ensure instantiating Scheduler w/o explicit connection works.
        """
        s = Scheduler()
        self.assertEqual(s.connection, self.testconn)

    def test_small_float_interval(self):
        """
        Test that scheduler accepts 'interval' of type float, less than 1 second.
        """
        key = Scheduler.scheduler_key
        lock_key = '%s_lock' % Scheduler.scheduler_key
        self.assertNotIn(key, tl(self.testconn.keys('*')))
        scheduler = Scheduler(connection=self.testconn, interval=0.1)   # testing interval = 0.1 second
        self.assertEqual(scheduler._interval, 0.1)

        #acquire lock
        self.assertTrue(scheduler.acquire_lock())
        self.assertIn(lock_key, tl(self.testconn.keys('*')))
        self.assertEqual(self.testconn.ttl(lock_key), 10)  # int(0.1) + 10 = 10

        #enqueue a job
        now = datetime.utcnow()
        job = scheduler.enqueue_at(now, say_hello)
        self.assertIn(job, self.scheduler.get_jobs_to_queue())
        self.assertEqual(len(list(self.scheduler.get_jobs())), 1)

        #remove the lock
        scheduler.remove_lock()

        #test that run works with the small floating-point interval
        def send_stop_signal():
            """
            Sleep for 1 second, then send a INT signal to ourself, so the
            signal handler installed by scheduler.run() is called.
            """
            time.sleep(1)
            os.kill(os.getpid(), signal.SIGINT)
        thread = Thread(target=send_stop_signal)
        thread.start()
        self.assertRaises(SystemExit, scheduler.run)
        thread.join()

        #all jobs must have been scheduled during 1 second
        self.assertEqual(len(list(scheduler.get_jobs())), 0)
Exemple #34
0
 def cancel(self):
     logger.info('cancelling future Notification pk {}'.format(self.pk))
     scheduler = Scheduler(connection=Redis())
     scheduler.cancel(self.job_id)
     self.canceled = True
     self.save()
Exemple #35
0
def cancel_job_view(job_id):
    scheduler = Scheduler()
    scheduler.cancel(job_id)
    return dict(status='OK')
Exemple #36
0
import os
import redis

# from rq import Queue
from rq_scheduler import Scheduler
from datetime import datetime
from app import loop_script

redis_url = os.getenv('REDISCLOUD_URL', 'redis://localhost:6379')
conn = redis.from_url(redis_url)

# q = Queue(connection=conn)
# result = q.enqueue(loop_script)

scheduler = Scheduler(connection=conn)

if len(scheduler.get_jobs()) > 0:
    for job in scheduler.get_jobs():
        scheduler.cancel(job)

scheduler.schedule(scheduled_time=datetime.utcnow(),
                   func=loop_script,
                   interval=30)

print(scheduler.get_jobs())
def schedules(org_label, project_label, schedule_label):
    ''' schedule configuration management
    /organizations/aquaya/projects/water-quality/schedules
        : viewing a list of this project's schedules
    /organizations/aquaya/projects/water-quality/schedules?create=true
        : create a new schedule config, immediately redirect to editing
    /organizations/aquaya/projects/water-quality/schedules/weekly
        : view the 'weekly' schedule config
    /organizations/aquaya/projects/water-quality/schedules/weekly?edit=true
        : edit a schedule config; accepts GET or POST
    '''
    user = User.objects(email=session['email'])[0]
    
    orgs = Organization.objects(label=org_label)
    if not orgs:
        flash('Organization "%s" not found, sorry!' % org_label, 'warning')
        return redirect(url_for('organizations'))
    org = orgs[0]

    # permission-check
    if org not in user.organizations and not user.admin_rights:
        app.logger.error('%s tried to view a project but was \
            denied for want of admin rights' % session['email'])
        abort(404)
    
    # find the project
    projects = Project.objects(label=project_label, organization=org) 
    if not projects:
        flash('Project "%s" not found, sorry!' % project_label, 'warning')
        return redirect(url_for('organizations', org_label=org.label))
    project = projects[0]

    if request.method == 'POST':
        # we have a schedule_label
        schedules = Schedule.objects(label=schedule_label, project=project)
        if not schedules:
            abort(404)
        schedule = schedules[0]

        form_type = request.form.get('form_type', '')
        if form_type == 'info':
            if schedule.name != request.form.get('name', ''):
                name = request.form.get('name', '')
                schedule.name = name

                schedules = Schedule.objects(project=project).only('label')
                labels = [s.label for s in schedules]
                schedule.label = utilities.generate_label(name, labels)

            schedule.description = request.form.get('description', '')
       
        elif form_type == 'items':
            # modify schedule attachments

            # 'items' is of the form report_id__4abcd00123 or statistic_id..
            # convert these to real objects
            items = request.form.getlist('items')
            attached_reports, attached_statistics = [], []
            for item in items:
                prefix, item_id = item.split('__')
                item_type = prefix.split('_')[0]

                if item_type == 'report':
                    reports = Report.objects(id=item_id)
                    if not reports:
                        abort(404)
                    attached_reports.append(reports[0])
                
                elif item_type == 'statistic':
                    statistics = Statistic.objects(id=item_id)
                    if not statistics:
                        abort(404)
                    attached_statistics.append(statistics[0])
    
            schedule.update(set__reports=attached_reports)
            schedule.update(set__statistics=attached_statistics)

            # whether we're also sending project data
            send_data = request.form.get('send_project_data', '')
            if send_data == 'true':
                schedule.update(set__send_project_data = True)
            else:
                schedule.update(set__send_project_data = False)

            # save the list of project-data-filters
            # filters are formatted like 'filter_id__4abcd123'
            filter_ids = request.form.getlist('filters')
            attached_filters = []
            for filter_id in filter_ids:
                filters = Filter.objects(id=filter_id.split('__')[1])
                if not filters:
                    abort(404)
                attached_filters.append(filters[0])

            schedule.update(set__data_filters = attached_filters)
            
            # how to apply the filters
            apply_any_filters = request.form.get('apply_any_filters', '')
            if apply_any_filters == 'true':
                schedule.update(set__apply_any_filters= True)
            else:
                schedule.update(set__apply_any_filters= False)

            flash('Schedules attachments saved.', 'success')
            url = url_for('schedules', org_label=org.label
                , project_label=project.label
                , schedule_label=schedule.label, edit='true')
            return redirect(url + '#items')

        elif form_type == 'schedule':
            old_interval = schedule.interval
            new_interval = {
                'type': request.form.get('schedule_type', '')
                , 'at': request.form.get('send_at', '')
                , 'on_day': request.form.get('send_on_day', '')}

            if old_interval != new_interval:
                schedule.interval = new_interval
                schedule.save()
                schedule.reload()
                # delete any old jobs and schedule the new ones
                update_scheduled_send(schedule.id)
            
            flash('Scheduling interval saved successfully.', 'success')
            url = url_for('schedules', org_label=org.label
                , project_label=project.label
                , schedule_label=schedule.label, edit='true')
            return redirect(url + '#schedule')
            
        
        elif form_type == 'message':
            message_type = request.form.get('message_type', '')
            schedule.message_type = message_type

            if message_type == 'email':
                # attach email info to schedule
                schedule.email_subject = request.form.get('email_subject', '')
                schedule.email_body = request.form.get('email_body', '')

                add_email = request.form.get('add_email_recipient_email', '')
                if add_email:
                    # is the email already in place? (shouldn't be)
                    for recipient in schedule.email_recipients:
                        if recipient['email'] == add_email:
                            flash('"%s" is already included in this \
                                schedule' % add_email, 'warning')
                            url = url_for('schedules', org_label=org.label
                                , project_label=project.label
                                , schedule_label=schedule.label, edit='true')
                            return redirect(url + '#message')
                    
                    else:
                        # add the recipient to the schedule
                        add_name = request.form.get(
                            'add_email_recipient_name', '')
                        recipient = {'email': add_email, 'name': add_name}

                        schedule.update(push__email_recipients=recipient)
                        flash('Successfully added "%s" to this schedule' \
                            % add_email, 'success')

                
                remove_email = request.form.get('remove_email_recipient', '')
                if remove_email:
                    # is the email already attached? (should be)
                    emails = [r['email'] for r in schedule.email_recipients]
                    if remove_email not in emails:
                        flash('"%s" is not included in this schedule and \
                            cannot be removed' % remove_email, 'warning')
                        url = url_for('schedules', org_label=org.label
                            , project_label=project.label
                            , schedule_label=schedule.label, edit='true')
                        return redirect(url + '#message')

                    else:
                        # remove it
                        for recipient in schedule.email_recipients:
                            if recipient['email'] == remove_email:
                                schedule.update(
                                    pull__email_recipients=recipient)
                                flash('Successfully removed the address "%s" \
                                    from this schedule' % remove_email
                                    , 'success')
            
            elif message_type == 'sms':
                # attach sms info to schedule
                add_number = request.form.get(
                    'add_sms_recipient_phone_number', '')
                if add_number:
                    # is the number already in place? (shouldn't be)
                    for recipient in schedule.sms_recipients:
                        if recipient['phone_number'] == add_number:
                            flash('"%s" is already included in this \
                                schedule' % add_number, 'warning')
                            url = url_for('schedules', org_label=org.label
                                , project_label=project.label
                                , schedule_label=schedule.label, edit='true')
                            return redirect(url + '#message')
                    
                    else:
                        # add the recipient to the schedule
                        add_name = request.form.get(
                            'add_sms_recipient_name', '')
                        recipient = {'phone_number': add_number
                            , 'name': add_name}

                        schedule.update(push__sms_recipients=recipient)
                        flash('Successfully added "%s" to this schedule' \
                            % add_number, 'success')

                
                remove_number = request.form.get('remove_sms_recipient', '')
                if remove_number:
                    # is the number already attached? (should be)
                    numbers = [r['phone_number'] for r in \
                        schedule.sms_recipients]
                    if remove_number not in numbers:
                        flash('"%s" is not included in this SMS schedule and \
                            cannot be removed' % remove_number, 'warning')
                        url = url_for('schedules', org_label=org.label
                            , project_label=project.label
                            , schedule_label=schedule.label, edit='true')
                        return redirect(url + '#message')

                    else:
                        # remove it
                        for recipient in schedule.sms_recipients:
                            if recipient['phone_number'] == remove_number:
                                schedule.update(
                                    pull__sms_recipients=recipient)
                                flash('Successfully removed the address "%s" \
                                    from this schedule' % remove_number
                                    , 'success')
                                break
            
            schedule.save()

            flash('Message settings changed successfully.', 'success')

            url = url_for('schedules', org_label=org.label
                , project_label=project.label, schedule_label=schedule.label
                , edit='true')
            return redirect(url + '#message')
       

        elif form_type == 'admin':
            # delete the schedule
            # keeping the associated messages as they're an important log
            # related items in the queue will not fire sans schedule 
            name = schedule.name

            # cancel any outstanding jobs
            if schedule.next_task_id:
                redis_config = app.config['REDIS_CONFIG']
                use_connection(Redis(redis_config['host'], redis_config['port']
                        , password=redis_config['password']))
                scheduler = Scheduler()
                job = Job(id=schedule.next_task_id)
                scheduler.cancel(job)

            # delete the job itself
            schedule.delete()
            app.logger.info('%s deleted schdedule "%s"' % \
                (session['email'], name))
            flash('schedule "%s" was deleted successfully' % name, 'success')
            return redirect(url_for('schedules', org_label=org.label
                , project_label=project.label))
        
        else:
            # bad 'form_type'
            abort(404)
       
        try:
            schedule.save()
            flash('Changes saved successfully.', 'success')
            return redirect(url_for('schedules', org_label=org.label
                , project_label=project.label
                , schedule_label=schedule.label, edit='true'))
        except:
            app.logger.error('%s experienced an error saving info about %s' % (
                session['email'], request.form['name']))
            flash('Error saving changes - make sure schedule names are \
                unique.', 'error')
            return redirect(url_for('schedules', org_label=org.label
                , project_label=project.label, schedule_label=schedule_label
                , edit='true'))
        
    
    if request.method == 'GET':
        if schedule_label:
            schedules = Schedule.objects(label=schedule_label, project=project)
            if not schedules:
                app.logger.error('%s tried to access a schedule that does \
                    not exist' % session['email'])
                flash('schedule "%s" not found, sorry!' % schedule_label
                    , 'warning')
                return redirect(url_for('schedules', org_label=org.label
                    , project_label=project.label))
            schedule = schedules[0]

            if request.args.get('edit', '') == 'true':
                
                available_reports = Report.objects(project=project)
                available_filters = Filter.objects(project=project)
                available_statistics = Statistic.objects(project=project)
                
                emails = json.dumps(schedule.email_recipients)

                current_time = datetime.datetime.utcnow()
                
                return render_template('schedule_edit.html', schedule=schedule
                        , available_reports=available_reports
                        , available_filters=available_filters
                        , available_statistics=available_statistics
                        , email_recipients=emails
                        , current_time = current_time)

            elif request.args.get('fire', '') == 'true':
                if ready_to_send(schedule):
                    # immediately enqueue the job with rq
                    # countdown briefly to ensure next_task_id can be updated?
                    redis_config = app.config['REDIS_CONFIG']
                    use_connection(Redis(redis_config['host']
                            , redis_config['port']
                            , password=redis_config['password']))
                    queue = Queue()
                    job = queue.enqueue(send_scheduled_report, schedule.id)
                    
                    # save the id
                    # race condition with enqueued func above?
                    # that's why we used to have a countdown
                    schedule.update(set__next_task_id = job.id)

                    flash('Sending message, this may take a few \
                        moments.  Check the message log for updates.', 'info')
                else:
                    flash('This schedule is not properly configured.', 'error')

                return redirect(url_for('schedules', org_label=org.label
                    , project_label=project.label
                    , schedule_label=schedule_label))

            else:
                # get all messages attached to this schedule
                messages = Message.objects(schedule=schedule).order_by(
                    '-creation_time')

                return render_template('schedule.html', schedule=schedule
                    , messages=messages)

        if request.args.get('create', '') == 'true':
            # create a new schedule

            # CSRF validation
            token = request.args.get('token', '')
            if not verify_token(token):
                abort(403)

            try:
                schedule_name = 'schd-%s' \
                    % utilities.generate_random_string(6)
                new_schedule = Schedule(
                    label = schedule_name.lower()
                    , project = project
                    , name = schedule_name
                )
                new_schedule.save() 
                app.logger.info('schedule created by %s' % session['email'])
                flash('schedule created.  Please change the default values.'
                    , 'success')
            except:
                app.logger.error('schedule creation failed for %s' % \
                    session['email'])
                flash('There was an error, sorry :/', 'error')
                return redirect(url_for('projects', org_label=org.label
                    , project=project.label))
            
            # redirect to the editing screen
            return redirect(url_for('schedules', org_label=org.label
                , project_label=project.label
                , schedule_label=new_schedule.label, edit='true'))
        
        # no statistic in particular was specified; show em all
        schedules = Schedule.objects(project=project)

        return render_template('project_schedules.html', project=project
            , schedules=schedules)
Exemple #38
0
class TestScheduler(RQTestCase):

    def setUp(self):
        super(TestScheduler, self).setUp()
        self.scheduler = Scheduler(connection=self.testconn)

    def test_birth_and_death_registration(self):
        """
        When scheduler registers it's birth, besides creating a key, it should
        also set an expiry that's a few seconds longer than it's polling
        interval so it automatically expires if scheduler is unexpectedly
        terminated.
        """
        key = Scheduler.scheduler_key
        self.assertNotIn(key, tl(self.testconn.keys('*')))
        scheduler = Scheduler(connection=self.testconn, interval=20)
        scheduler.register_birth()
        self.assertIn(key, tl(self.testconn.keys('*')))
        self.assertEqual(self.testconn.ttl(key), 30)
        self.assertFalse(self.testconn.hexists(key, 'death'))
        self.assertRaises(ValueError, scheduler.register_birth)
        scheduler.register_death()
        self.assertTrue(self.testconn.hexists(key, 'death'))

    def test_create_job(self):
        """
        Ensure that jobs are created properly.
        """
        job = self.scheduler._create_job(say_hello, args=(), kwargs={})
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job, job_from_queue)
        self.assertEqual(job_from_queue.func, say_hello)

    def test_create_job_with_ttl(self):
        """
        Ensure that TTL is passed to RQ.
        """
        job = self.scheduler._create_job(say_hello, ttl=2, args=(), kwargs={})
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(2, job_from_queue.ttl)

    def test_create_job_with_id(self):
        """
        Ensure that ID is passed to RQ.
        """
        job = self.scheduler._create_job(say_hello, id='id test', args=(), kwargs={})
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual('id test', job_from_queue.id)

    def test_create_job_with_description(self):
        """
        Ensure that description is passed to RQ.
        """
        job = self.scheduler._create_job(say_hello, description='description', args=(), kwargs={})
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual('description', job_from_queue.description)

    def test_job_not_persisted_if_commit_false(self):
        """
        Ensure jobs are only saved to Redis if commit=True.
        """
        job = self.scheduler._create_job(say_hello, commit=False)
        self.assertEqual(self.testconn.hgetall(job.key), {})

    def test_create_scheduled_job(self):
        """
        Ensure that scheduled jobs are put in the scheduler queue with the right score
        """
        scheduled_time = datetime.utcnow()
        job = self.scheduler.enqueue_at(scheduled_time, say_hello)
        self.assertEqual(job, Job.fetch(job.id, connection=self.testconn))
        self.assertIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))
        self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
                         to_unix(scheduled_time))

    def test_enqueue_in(self):
        """
        Ensure that jobs have the right scheduled time.
        """
        right_now = datetime.utcnow()
        time_delta = timedelta(minutes=1)
        job = self.scheduler.enqueue_in(time_delta, say_hello)
        self.assertIn(job.id,
                      tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))
        self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
                         to_unix(right_now + time_delta))
        time_delta = timedelta(hours=1)
        job = self.scheduler.enqueue_in(time_delta, say_hello)
        self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
                         to_unix(right_now + time_delta))

    def test_get_jobs(self):
        """
        Ensure get_jobs() returns all jobs until the specified time.
        """
        now = datetime.utcnow()
        job = self.scheduler.enqueue_at(now, say_hello)
        self.assertIn(job, self.scheduler.get_jobs(now))
        future_time = now + timedelta(hours=1)
        job = self.scheduler.enqueue_at(future_time, say_hello)
        self.assertIn(job, self.scheduler.get_jobs(timedelta(hours=1, seconds=1)))
        self.assertIn(job, [j[0] for j in self.scheduler.get_jobs(with_times=True)])
        self.assertIsInstance(self.scheduler.get_jobs(with_times=True)[0][1], datetime)
        self.assertNotIn(job, self.scheduler.get_jobs(timedelta(minutes=59, seconds=59)))

    def test_get_jobs_to_queue(self):
        """
        Ensure that jobs scheduled the future are not queued.
        """
        now = datetime.utcnow()
        job = self.scheduler.enqueue_at(now, say_hello)
        self.assertIn(job, self.scheduler.get_jobs_to_queue())
        future_time = now + timedelta(hours=1)
        job = self.scheduler.enqueue_at(future_time, say_hello)
        self.assertNotIn(job, self.scheduler.get_jobs_to_queue())

    def test_enqueue_job(self):
        """
        When scheduled job is enqueued, make sure:
        - Job is removed from the sorted set of scheduled jobs
        - "enqueued_at" attribute is properly set
        - Job appears in the right queue
        - Queue is recognized by rq's Queue.all()
        """
        now = datetime.utcnow()
        queue_name = 'foo'
        scheduler = Scheduler(connection=self.testconn, queue_name=queue_name)

        job = scheduler.enqueue_at(now, say_hello)
        self.scheduler.enqueue_job(job)
        self.assertNotIn(job, tl(self.testconn.zrange(scheduler.scheduled_jobs_key, 0, 10)))
        job = Job.fetch(job.id, connection=self.testconn)
        self.assertTrue(job.enqueued_at is not None)
        queue = scheduler.get_queue_for_job(job)
        self.assertIn(job, queue.jobs)
        queue = Queue.from_queue_key('rq:queue:{0}'.format(queue_name))
        self.assertIn(job, queue.jobs)
        self.assertIn(queue, Queue.all())

    def test_job_membership(self):
        now = datetime.utcnow()
        job = self.scheduler.enqueue_at(now, say_hello)
        self.assertIn(job, self.scheduler)
        self.assertIn(job.id, self.scheduler)
        self.assertNotIn("non-existing-job-id", self.scheduler)

    def test_cancel_scheduled_job(self):
        """
        When scheduled job is canceled, make sure:
        - Job is removed from the sorted set of scheduled jobs
        """
        # schedule a job to be enqueued one minute from now
        time_delta = timedelta(minutes=1)
        job = self.scheduler.enqueue_in(time_delta, say_hello)
        # cancel the scheduled job and check that it's gone from the set
        self.scheduler.cancel(job)
        self.assertNotIn(job.id, tl(self.testconn.zrange(
            self.scheduler.scheduled_jobs_key, 0, 1)))

    def test_change_execution_time(self):
        """
        Ensure ``change_execution_time`` is called, ensure that job's score is updated
        """
        job = self.scheduler.enqueue_at(datetime.utcnow(), say_hello)
        new_date = datetime(2010, 1, 1)
        self.scheduler.change_execution_time(job, new_date)
        self.assertEqual(to_unix(new_date),
            self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id))
        self.scheduler.cancel(job)
        self.assertRaises(ValueError, self.scheduler.change_execution_time, job, new_date)

    def test_args_kwargs_are_passed_correctly(self):
        """
        Ensure that arguments and keyword arguments are properly saved to jobs.
        """
        job = self.scheduler.enqueue_at(datetime.utcnow(), simple_addition, 1, 1, 1)
        self.assertEqual(job.args, (1, 1, 1))
        job = self.scheduler.enqueue_at(datetime.utcnow(), simple_addition, z=1, y=1, x=1)
        self.assertEqual(job.kwargs, {'x': 1, 'y': 1, 'z': 1})
        job = self.scheduler.enqueue_at(datetime.utcnow(), simple_addition, 1, z=1, y=1)
        self.assertEqual(job.kwargs, {'y': 1, 'z': 1})
        self.assertEqual(job.args, (1,))

        time_delta = timedelta(minutes=1)
        job = self.scheduler.enqueue_in(time_delta, simple_addition, 1, 1, 1)
        self.assertEqual(job.args, (1, 1, 1))
        job = self.scheduler.enqueue_in(time_delta, simple_addition, z=1, y=1, x=1)
        self.assertEqual(job.kwargs, {'x': 1, 'y': 1, 'z': 1})
        job = self.scheduler.enqueue_in(time_delta, simple_addition, 1, z=1, y=1)
        self.assertEqual(job.kwargs, {'y': 1, 'z': 1})
        self.assertEqual(job.args, (1,))

    def test_enqueue_is_deprecated(self):
        """
        Ensure .enqueue() throws a DeprecationWarning
        """
        with warnings.catch_warnings(record=True) as w:
            # Enable all warnings
            warnings.simplefilter("always")
            job = self.scheduler.enqueue(datetime.utcnow(), say_hello)
            self.assertEqual(1, len(w))
            self.assertEqual(w[0].category, DeprecationWarning)

    def test_enqueue_periodic(self):
        """
        Ensure .enqueue_periodic() throws a DeprecationWarning
        """
        with warnings.catch_warnings(record=True) as w:
            # Enable all warnings
            warnings.simplefilter("always")
            job = self.scheduler.enqueue_periodic(datetime.utcnow(), 1, None, say_hello)
            self.assertEqual(1, len(w))
            self.assertEqual(w[0].category, DeprecationWarning)

    def test_interval_and_repeat_persisted_correctly(self):
        """
        Ensure that interval and repeat attributes get correctly saved in Redis.
        """
        job = self.scheduler.schedule(datetime.utcnow(), say_hello, interval=10, repeat=11)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job_from_queue.meta['interval'], 10)
        self.assertEqual(job_from_queue.meta['repeat'], 11)

    def test_crontab_persisted_correctly(self):
        """
        Ensure that crontab attribute gets correctly saved in Redis.
        """
        # create a job that runs one minute past each whole hour
        job = self.scheduler.cron("1 * * * *", say_hello)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job_from_queue.meta['cron_string'], "1 * * * *")

        # get the scheduled_time and convert it to a datetime object
        unix_time = self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id)
        datetime_time = from_unix(unix_time)

        # check that minute=1, seconds=0, and is within an hour
        assert datetime_time.minute == 1
        assert datetime_time.second == 0
        assert datetime_time - datetime.utcnow() < timedelta(hours=1)

    def test_crontab_sets_timeout(self):
        """
        Ensure that a job scheduled via crontab can be created with
        a custom timeout.
        """
        timeout = 13
        job = self.scheduler.cron("1 * * * *", say_hello, timeout=timeout)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job_from_queue.timeout, timeout)

    def test_crontab_sets_id(self):
        """
        Ensure that a job scheduled via crontab can be created with
        a custom id
        """
        job_id = "hello-job-id"
        job = self.scheduler.cron("1 * * * *", say_hello, id=job_id)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job_id, job_from_queue.id)

    def test_crontab_sets_default_result_ttl(self):
        """
        Ensure that a job scheduled via crontab gets proper default
        result_ttl (-1) periodic tasks.
        """
        job = self.scheduler.cron("1 * * * *", say_hello)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(-1, job_from_queue.result_ttl)

    def test_crontab_sets_description(self):
        """
        Ensure that a job scheduled via crontab can be created with
        a custom description
        """
        description = 'test description'
        job = self.scheduler.cron("1 * * * *", say_hello, description=description)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(description, job_from_queue.description)

    def test_repeat_without_interval_raises_error(self):
        # Ensure that an error is raised if repeat is specified without interval
        def create_job():
            self.scheduler.schedule(datetime.utcnow(), say_hello, repeat=11)
        self.assertRaises(ValueError, create_job)

    def test_job_with_intervals_get_rescheduled(self):
        """
        Ensure jobs with interval attribute are put back in the scheduler
        """
        time_now = datetime.utcnow()
        interval = 10
        job = self.scheduler.schedule(time_now, say_hello, interval=interval)
        self.scheduler.enqueue_job(job)
        self.assertIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))
        self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
                         to_unix(time_now) + interval)

        # Now the same thing using enqueue_periodic
        job = self.scheduler.enqueue_periodic(time_now, interval, None, say_hello)
        self.scheduler.enqueue_job(job)
        self.assertIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))
        self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
                         to_unix(time_now) + interval)

    def test_job_with_crontab_get_rescheduled(self):
        # Create a job with a cronjob_string
        job = self.scheduler.cron("1 * * * *", say_hello)

        # current unix_time
        old_next_scheduled_time = self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id)

        # change crontab
        job.meta['cron_string'] = "2 * * * *"

        # enqueue the job
        self.scheduler.enqueue_job(job)

        self.assertIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))

        # check that next scheduled time has changed
        self.assertNotEqual(old_next_scheduled_time,
                            self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id))

        # check that new next scheduled time is set correctly
        expected_next_scheduled_time = to_unix(get_next_scheduled_time("2 * * * *"))
        self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
                         expected_next_scheduled_time)

    def test_job_with_repeat(self):
        """
        Ensure jobs with repeat attribute are put back in the scheduler
        X (repeat) number of times
        """
        time_now = datetime.utcnow()
        interval = 10
        # If job is repeated once, the job shouldn't be put back in the queue
        job = self.scheduler.schedule(time_now, say_hello, interval=interval, repeat=1)
        self.scheduler.enqueue_job(job)
        self.assertNotIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))

        # If job is repeated twice, it should only be put back in the queue once
        job = self.scheduler.schedule(time_now, say_hello, interval=interval, repeat=2)
        self.scheduler.enqueue_job(job)
        self.assertIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))
        self.scheduler.enqueue_job(job)
        self.assertNotIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))

        time_now = datetime.utcnow()
        # Now the same thing using enqueue_periodic
        job = self.scheduler.enqueue_periodic(time_now, interval, 1, say_hello)
        self.scheduler.enqueue_job(job)
        self.assertNotIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))

        # If job is repeated twice, it should only be put back in the queue once
        job = self.scheduler.enqueue_periodic(time_now, interval, 2, say_hello)
        self.scheduler.enqueue_job(job)
        self.assertIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))
        self.scheduler.enqueue_job(job)
        self.assertNotIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))

    def test_missing_jobs_removed_from_scheduler(self):
        """
        Ensure jobs that don't exist when queued are removed from the scheduler.
        """
        job = self.scheduler.schedule(datetime.utcnow(), say_hello)
        job.cancel()
        self.scheduler.get_jobs_to_queue()
        self.assertNotIn(job.id, tl(self.testconn.zrange(
            self.scheduler.scheduled_jobs_key, 0, 1)))

    def test_periodic_jobs_sets_result_ttl(self):
        """
        Ensure periodic jobs set result_ttl to infinite.
        """
        job = self.scheduler.schedule(datetime.utcnow(), say_hello, interval=5)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job.result_ttl, -1)

    def test_periodic_jobs_sets_ttl(self):
        """
        Ensure periodic jobs sets correctly ttl.
        """
        job = self.scheduler.schedule(datetime.utcnow(), say_hello, interval=5, ttl=4)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job.ttl, 4)

    def test_periodic_job_sets_id(self):
        """
        Ensure that ID is passed to RQ by schedule.
        """
        job = self.scheduler.schedule(datetime.utcnow(), say_hello, interval=5, id='id test')
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual('id test', job.id)

    def test_periodic_job_sets_description(self):
        """
        Ensure that description is passed to RQ by schedule.
        """
        job = self.scheduler.schedule(datetime.utcnow(), say_hello, interval=5, description='description')
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual('description', job.description)

    def test_run(self):
        """
        Check correct signal handling in Scheduler.run().
        """
        def send_stop_signal():
            """
            Sleep for 1 second, then send a INT signal to ourself, so the
            signal handler installed by scheduler.run() is called.
            """
            time.sleep(1)
            os.kill(os.getpid(), signal.SIGINT)
        thread = Thread(target=send_stop_signal)
        thread.start()
        self.assertRaises(SystemExit, self.scheduler.run)
        thread.join()

    def test_scheduler_w_o_explicit_connection(self):
        """
        Ensure instantiating Scheduler w/o explicit connection works.
        """
        s = Scheduler()
        self.assertEqual(s.connection, self.testconn)

    def test_small_float_interval(self):
        """
        Test that scheduler accepts 'interval' of type float, less than 1 second.
        """
        key = Scheduler.scheduler_key
        self.assertNotIn(key, tl(self.testconn.keys('*')))
        scheduler = Scheduler(connection=self.testconn, interval=0.1)   # testing interval = 0.1 second
        self.assertEqual(scheduler._interval, 0.1)

        #register birth
        scheduler.register_birth()
        self.assertIn(key, tl(self.testconn.keys('*')))
        self.assertEqual(self.testconn.ttl(key), 10)  # int(0.1) + 10 = 10
        self.assertFalse(self.testconn.hexists(key, 'death'))

        #enqueue a job
        now = datetime.utcnow()
        job = scheduler.enqueue_at(now, say_hello)
        self.assertIn(job, self.scheduler.get_jobs_to_queue())
        self.assertEqual(len(self.scheduler.get_jobs()), 1)

        #register death
        scheduler.register_death()

        #test that run works with the small floating-point interval
        def send_stop_signal():
            """
            Sleep for 1 second, then send a INT signal to ourself, so the
            signal handler installed by scheduler.run() is called.
            """
            time.sleep(1)
            os.kill(os.getpid(), signal.SIGINT)
        thread = Thread(target=send_stop_signal)
        thread.start()
        self.assertRaises(SystemExit, scheduler.run)
        thread.join()

        #all jobs must have been scheduled during 1 second
        self.assertEqual(len(scheduler.get_jobs()), 0)
class TestScheduler(RQTestCase):

    def setUp(self):
        super(TestScheduler, self).setUp()
        self.scheduler = Scheduler(connection=self.testconn)

    def test_birth_and_death_registration(self):
        key = Scheduler.scheduler_key
        self.assertNotIn(key, self.testconn.keys('*'))
        scheduler = Scheduler(connection=self.testconn)
        scheduler.register_birth()
        self.assertIn(key, self.testconn.keys('*'))
        self.assertFalse(self.testconn.hexists(key, 'death'))
        self.assertRaises(ValueError, scheduler.register_birth)
        scheduler.register_death()
        self.assertTrue(self.testconn.hexists(key, 'death'))

    def test_create_job(self):
        """
        Ensure that jobs are created properly.
        """
        job = self.scheduler._create_job(say_hello, args=(), kwargs={})
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job, job_from_queue)
        self.assertEqual(job_from_queue.func, say_hello)

    def test_job_not_persisted_if_commit_false(self):
        """
        Ensure jobs are only saved to Redis if commit=True.
        """
        job = self.scheduler._create_job(say_hello, commit=False)
        self.assertEqual(self.testconn.hgetall(job.key), {})

    def test_create_scheduled_job(self):
        """
        Ensure that scheduled jobs are put in the scheduler queue with the right score
        """
        scheduled_time = datetime.now()
        job = self.scheduler.enqueue_at(scheduled_time, say_hello)
        self.assertEqual(job, Job.fetch(job.id, connection=self.testconn))
        self.assertIn(job.id,
            self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1))
        self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
                         int(scheduled_time.strftime('%s')))

    def test_enqueue_in(self):
        """
        Ensure that jobs have the right scheduled time.
        """
        right_now = datetime.now()
        time_delta = timedelta(minutes=1)
        job = self.scheduler.enqueue_in(time_delta, say_hello)
        self.assertIn(job.id, self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1))
        self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
                         int((right_now + time_delta).strftime('%s')))
        time_delta = timedelta(hours=1)
        job = self.scheduler.enqueue_in(time_delta, say_hello)
        self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
                         int((right_now + time_delta).strftime('%s')))

    def test_get_jobs_to_queue(self):
        """
        Ensure that jobs scheduled the future are not queued.
        """
        now = datetime.now()
        job = self.scheduler.enqueue_at(now, say_hello)
        self.assertIn(job, self.scheduler.get_jobs_to_queue())
        future_time = now + timedelta(hours=1)
        job = self.scheduler.enqueue_at(future_time, say_hello)
        self.assertNotIn(job, self.scheduler.get_jobs_to_queue())

    def test_enqueue_job(self):
        """
        When scheduled job is enqueued, make sure:
        - Job is removed from the sorted set of scheduled jobs
        - "enqueued_at" attribute is properly set
        - Job appears in the right queue
        """
        now = datetime.now()
        queue_name = 'foo'
        scheduler = Scheduler(connection=self.testconn, queue_name=queue_name)

        job = scheduler.enqueue_at(now, say_hello)
        self.scheduler.enqueue_job(job)
        self.assertNotIn(job, self.testconn.zrange(scheduler.scheduled_jobs_key, 0, 10))
        job = Job.fetch(job.id, connection=self.testconn)
        self.assertTrue(job.enqueued_at is not None)
        queue = scheduler.get_queue_for_job(job)
        self.assertIn(job, queue.jobs)
        queue = Queue.from_queue_key('rq:queue:{0}'.format(queue_name))
        self.assertIn(job, queue.jobs)

    def test_cancel_scheduled_job(self):
        """
        When scheduled job is canceled, make sure:
        - Job is removed from the sorted set of scheduled jobs
        """
        # schedule a job to be enqueued one minute from now
        time_delta = timedelta(minutes=1)
        job = self.scheduler.enqueue_in(time_delta, say_hello)
        # cancel the scheduled job and check that it's gone from the set
        self.scheduler.cancel(job)
        self.assertNotIn(job.id, self.testconn.zrange(
            self.scheduler.scheduled_jobs_key, 0, 1))

    def test_change_execution_time(self):
        """
        Ensure ``change_execution_time`` is called, ensure that job's score is updated
        """
        job = self.scheduler.enqueue_at(datetime.now(), say_hello)
        new_date = datetime(2010, 1, 1)
        self.scheduler.change_execution_time(job, new_date)
        self.assertEqual(int(new_date.strftime('%s')),
            self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id))

    def test_args_kwargs_are_passed_correctly(self):
        """
        Ensure that arguments and keyword arguments are properly saved to jobs.
        """
        job = self.scheduler.enqueue_at(datetime.now(), simple_addition, 1, 1, 1)
        self.assertEqual(job.args, (1, 1, 1))
        job = self.scheduler.enqueue_at(datetime.now(), simple_addition, z=1, y=1, x=1)
        self.assertEqual(job.kwargs, {'x': 1, 'y': 1, 'z': 1})
        job = self.scheduler.enqueue_at(datetime.now(), simple_addition, 1, z=1, y=1)
        self.assertEqual(job.kwargs, {'y': 1, 'z': 1})
        self.assertEqual(job.args, (1,))

        time_delta = timedelta(minutes=1)
        job = self.scheduler.enqueue_in(time_delta, simple_addition, 1, 1, 1)
        self.assertEqual(job.args, (1, 1, 1))
        job = self.scheduler.enqueue_in(time_delta, simple_addition, z=1, y=1, x=1)
        self.assertEqual(job.kwargs, {'x': 1, 'y': 1, 'z': 1})
        job = self.scheduler.enqueue_in(time_delta, simple_addition, 1, z=1, y=1)
        self.assertEqual(job.kwargs, {'y': 1, 'z': 1})
        self.assertEqual(job.args, (1,))

    def test_interval_and_repeat_persisted_correctly(self):
        """
        Ensure that interval and repeat attributes get correctly saved in Redis.
        """
        job = self.scheduler.enqueue(datetime.now(), say_hello, interval=10, repeat=11)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(int(job_from_queue.interval), 10)
        self.assertEqual(int(job_from_queue.repeat), 11)

    def test_repeat_without_interval_raises_error(self):
        # Ensure that an error is raised if repeat is specified without interval
        def create_job():
            self.scheduler.enqueue(datetime.now(), say_hello, repeat=11)
        self.assertRaises(ValueError, create_job)

    def test_job_with_intervals_get_rescheduled(self):
        """
        Ensure jobs with interval attribute are put back in the scheduler
        """
        time_now = datetime.now()
        interval = 10
        job = self.scheduler.enqueue(time_now, say_hello, interval=interval)
        self.scheduler.enqueue_job(job)
        self.assertIn(job.id,
            self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1))
        self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
                         int(time_now.strftime('%s')) + interval)

        # Now the same thing using enqueue_periodic
        job = self.scheduler.enqueue_periodic(time_now, interval, None, say_hello)
        self.scheduler.enqueue_job(job)
        self.assertIn(job.id,
            self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1))
        self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
                         int(time_now.strftime('%s')) + interval)

    def test_job_with_repeat(self):
        """
        Ensure jobs with repeat attribute are put back in the scheduler
        X (repeat) number of times
        """
        time_now = datetime.now()
        interval = 10
        # If job is repeated once, the job shouldn't be put back in the queue
        job = self.scheduler.enqueue(time_now, say_hello, interval=interval, repeat=1)
        self.scheduler.enqueue_job(job)
        self.assertNotIn(job.id,
            self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1))

        # If job is repeated twice, it should only be put back in the queue once
        job = self.scheduler.enqueue(time_now, say_hello, interval=interval, repeat=2)
        self.scheduler.enqueue_job(job)
        self.assertIn(job.id,
            self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1))
        self.scheduler.enqueue_job(job)
        self.assertNotIn(job.id,
            self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1))

        time_now = datetime.now()
        # Now the same thing using enqueue_periodic
        job = self.scheduler.enqueue_periodic(time_now, interval, 1, say_hello)
        self.scheduler.enqueue_job(job)
        self.assertNotIn(job.id,
            self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1))

        # If job is repeated twice, it should only be put back in the queue once
        job = self.scheduler.enqueue_periodic(time_now, interval, 2, say_hello)
        self.scheduler.enqueue_job(job)
        self.assertIn(job.id,
            self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1))
        self.scheduler.enqueue_job(job)
        self.assertNotIn(job.id,
            self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1))

    def test_missing_jobs_removed_from_scheduler(self):
        """
        Ensure jobs that don't exist when queued are removed from the scheduler.
        """
        job = self.scheduler.enqueue(datetime.now(), say_hello)
        job.cancel()
        self.scheduler.get_jobs_to_queue()
        self.assertNotIn(job.id, self.testconn.zrange(
            self.scheduler.scheduled_jobs_key, 0, 1))

    def test_periodic_jobs_sets_ttl(self):
        """
        Ensure periodic jobs set result_ttl to infinite.
        """
        job = self.scheduler.enqueue(datetime.now(), say_hello, interval=5)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job.result_ttl, -1)