Ejemplo n.º 1
0
class TestSetupScheduledJobs(object):
    """Tests for setup function 'schedule_job'"""

    def setUp(self):
        db = getattr(settings_test, 'REDIS_DB', 0)
        pwd = getattr(settings_test, 'REDIS_PWD', None)
        master = getattr(settings_test, 'REDIS_MASTER_NAME', 'mymaster')
        if all(hasattr(settings_test, attr) for attr in ['REDIS_MASTER_DNS', 'REDIS_PORT']):
            self.connection = StrictRedis(host=settings_test.REDIS_MASTER_DNS,
                port=settings_test.REDIS_PORT, db=db, password=pwd)
        else:
            sentinel = Sentinel(settings_test.REDIS_SENTINEL)
            self.connection = sentinel.master_for(master, db=db, password=pwd)
        self.connection.flushall()
        self.scheduler = Scheduler('test_queue', connection=self.connection)

    def test_adds_scheduled_job_with_interval(self):
        a_job['interval'] = 7
        schedule_job(a_job, self.scheduler)
        sched_jobs = list(self.scheduler.get_jobs())

        t = len(sched_jobs)
        assert t == 1, sched_jobs
        job = sched_jobs[0]
        assert job.meta['interval'] == 7 , job.meta
        a_job['interval'] = 1

    def test_adds_several_jobs_(self):
        schedule_job(a_job, self.scheduler)
        schedule_job(another_job, self.scheduler)
        sched_jobs = self.scheduler.get_jobs()
        job_func_names = [job.func_name for job in sched_jobs]
        module_name = 'test_jobs.test_schedule_jobs'

        jobs = list(self.scheduler.get_jobs())
        assert len(jobs) == 2, len(jobs)
        assert module_name + '.a_function' in job_func_names, job_func_names
        assert module_name + '.another_function' in job_func_names, job_func_names

    def test_does_not_add_job_if_already_added(self):
        schedule_job(a_job, self.scheduler)
        schedule_job(a_job, self.scheduler)
        sched_jobs = list(self.scheduler.get_jobs())

        assert len(sched_jobs) == 1, sched_jobs

    def test_returns_log_messages(self):
        success_message = schedule_job(a_job, self.scheduler)
        s_m = 'Scheduled a_function([], {}) to run every 1 seconds'
        assert success_message == s_m, (success_message, s_m)

        failure_message = schedule_job(a_job, self.scheduler)
        assert failure_message == 'WARNING: Job a_function([], {}) is already scheduled'

    def test_failed_attempt_to_schedule_does_not_polute_redis(self):
        schedule_job(a_job, self.scheduler)
        schedule_job(a_job, self.scheduler)
        stored_values = self.connection.keys('rq:job*')

        assert len(stored_values) == 1, len(stored_values)
def main():

    parser = argparse.ArgumentParser()

    parser.add_argument("action",
                        choices=['show', 'remove', 'purge'],
                        help="Which action to run.")

    parser.add_argument(
        "object",
        help=
        "Which object to remove (if removing). Add 'all' where this argument is unused."
    )

    parser.add_argument(
        "-e",
        '--env',
        choices=['development', 'test', 'production'],
        help=
        "Run within a specific environment. Otherwise run under the environment defined in the environment variable CS_ENV"
    )

    args = parser.parse_args()
    if args.env != None:
        os.environ['CS_ENV'] = args.env

    queue_name = os.environ['CS_ENV']
    scheduler = Scheduler(queue_name=os.environ['CS_ENV'], connection=Redis())

    if (args.action == "show"):
        print("\n")
        print("=================================")
        print("  Job Schedule For {0}".format(os.environ['CS_ENV']))
        print("=================================")
        print("\n")
        for job in scheduler.get_jobs(until=timedelta(hours=24),
                                      with_times=True):
            print("ID: {1}\n    Job: {0}\n    Time: {2}\n".format(
                job[0].description, job[0].id, job[1]))
    elif (args.action == "remove"):
        if (args.object is None):
            print("Please specify the job to remove")
        else:
            jobs = scheduler.get_jobs()
            for job in jobs:
                if (args.object == job.id):
                    scheduler.cancel(job.id)
                    print("Job {0} cancelled from {1}".format(
                        args.object, os.environ['CS_ENV']))
    elif (args.action == "purge"):
        count = 0
        for job in scheduler.get_jobs():
            count += 1
            scheduler.cancel(job.id)
        print("Purged {0} jobs from {1}".format(count, os.environ['CS_ENV']))
def test_schedule_jobs(mocker, queue, jobid):
    sch = Scheduler(queue=queue, connection=queue.connection)
    sch.cancel = mocker.MagicMock()
    jobs.schedule_jobs(sch)
    assert jobid in sch
    assert len(list(sch.get_jobs())) == 1
    # running again should have no effect
    jobs.schedule_jobs(sch)
    assert jobid in sch
    assert len(list(sch.get_jobs())) == 1
    assert not sch.cancel.called
Ejemplo n.º 4
0
class TestSetupScheduledJobs(object):
    """Tests for setup function '_schedule_job'"""

    def setUp(self):
        self.connection = StrictRedis()
        self.connection.flushall()
        self.scheduler = Scheduler('test_queue', connection=self.connection)


    def test_adds_scheduled_job_with_interval(self):
        a_job['interval'] = 7
        _schedule_job(a_job, self.scheduler)
        sched_jobs = self.scheduler.get_jobs()

        assert len(sched_jobs) == 1, sched_jobs
        assert sched_jobs[0].meta['interval'] == 7 , sched_jobs[0].meta
        a_job['interval'] = 1


    def test_adds_several_jobs_(self):
        _schedule_job(a_job, self.scheduler)
        _schedule_job(another_job, self.scheduler)
        sched_jobs = self.scheduler.get_jobs()
        job_func_names = [job.func_name for job in sched_jobs]
        module_name = 'test_jobs.test_schedule_jobs'

        assert len(sched_jobs) == 2, sched_jobs
        assert module_name + '.a_function' in job_func_names, job_func_names
        assert module_name + '.another_function' in job_func_names, job_func_names


    def test_does_not_add_job_if_already_added(self):
        _schedule_job(a_job, self.scheduler)
        _schedule_job(a_job, self.scheduler)
        sched_jobs = self.scheduler.get_jobs()

        assert len(sched_jobs) == 1, sched_jobs


    def test_returns_log_messages(self):
        success_message = _schedule_job(a_job, self.scheduler)
        failure_message = _schedule_job(a_job, self.scheduler)

        assert success_message == 'Scheduled a_function([], {}) to run every 1 seconds'
        assert failure_message == 'WARNING: Job a_function([], {}) is already scheduled'


    def test_failed_attempt_to_schedule_does_not_polute_redis(self):
        _schedule_job(a_job, self.scheduler)
        _schedule_job(a_job, self.scheduler)
        stored_values = self.connection.keys('rq:job*')

        assert len(stored_values) == 1, len(stored_values)
def test_schedule_jobs_modified(mocker, queue, sql_job):
    mocker.patch('sfa_api.jobs.storage._call_procedure',
                 return_value=[sql_job])
    sch = Scheduler(queue=queue, connection=queue.connection)
    jobs.schedule_jobs(sch)
    assert list(sch.get_jobs())[0].meta['last_modified_in_sql'] == dt.datetime(
        2019, 1, 1, 12, tzinfo=dt.timezone.utc)
    njob = sql_job.copy()
    njob['modified_at'] = dt.datetime(2019, 2, 1, tzinfo=dt.timezone.utc)
    mocker.patch('sfa_api.jobs.storage._call_procedure', return_value=[njob])
    jobs.schedule_jobs(sch)
    assert list(sch.get_jobs())[0].meta['last_modified_in_sql'] == dt.datetime(
        2019, 2, 1, tzinfo=dt.timezone.utc)
Ejemplo n.º 6
0
class TestSetupScheduledJobs(object):
    """Tests for setup function 'schedule_job'"""
    def setUp(self):
        sentinel = Sentinel(settings_test.REDIS_SENTINEL)
        db = getattr(settings_test, 'REDIS_DB', 0)
        self.connection = sentinel.master_for('mymaster', db=db)
        self.connection.flushall()
        self.scheduler = Scheduler('test_queue', connection=self.connection)

    def test_adds_scheduled_job_with_interval(self):
        a_job['interval'] = 7
        schedule_job(a_job, self.scheduler)
        sched_jobs = self.scheduler.get_jobs()

        assert len(sched_jobs) == 1, sched_jobs
        assert sched_jobs[0].meta['interval'] == 7, sched_jobs[0].meta
        a_job['interval'] = 1

    def test_adds_several_jobs_(self):
        schedule_job(a_job, self.scheduler)
        schedule_job(another_job, self.scheduler)
        sched_jobs = self.scheduler.get_jobs()
        job_func_names = [job.func_name for job in sched_jobs]
        module_name = 'test_jobs.test_schedule_jobs'

        assert len(sched_jobs) == 2, sched_jobs
        assert module_name + '.a_function' in job_func_names, job_func_names
        assert module_name + '.another_function' in job_func_names, job_func_names

    def test_does_not_add_job_if_already_added(self):
        schedule_job(a_job, self.scheduler)
        schedule_job(a_job, self.scheduler)
        sched_jobs = self.scheduler.get_jobs()

        assert len(sched_jobs) == 1, sched_jobs

    def test_returns_log_messages(self):
        success_message = schedule_job(a_job, self.scheduler)
        failure_message = schedule_job(a_job, self.scheduler)

        assert success_message == 'Scheduled a_function([], {}) to run every 1 seconds'
        assert failure_message == 'WARNING: Job a_function([], {}) is already scheduled'

    def test_failed_attempt_to_schedule_does_not_polute_redis(self):
        schedule_job(a_job, self.scheduler)
        schedule_job(a_job, self.scheduler)
        stored_values = self.connection.keys('rq:job*')

        assert len(stored_values) == 1, len(stored_values)
Ejemplo n.º 7
0
def list_synchronization():
    r = StrictRedis()
    scheduler = Scheduler(connection=r)
    list_of_job_instances = scheduler.get_jobs()
    for job_instance in list_of_job_instances:
        job_id = job_instance.id
        print(job_id)
Ejemplo n.º 8
0
    def get_context_data(self, **kwargs):
        ctx = super(SchedulerDetails, self).get_context_data(**kwargs)
        if Scheduler is None:
            # rq_scheduler is not installed
            raise Http404
        scheduler = Scheduler(self.connection)
        queue = Queue(self.kwargs['queue'], connection=self.connection)

        def cond(job_tuple):
            job, next_run = job_tuple
            return job.origin == queue.name

        jobs = filter(cond, scheduler.get_jobs(with_times=True))

        ctx.update({
            'queue':
            queue,
            'jobs':
            [serialize_scheduled_job(job, next_run) for job, next_run in jobs],
            'has_permission':
            True,
            'title':
            "Jobs scheduled on '%s' queue" % queue.name,
        })
        return ctx
Ejemplo n.º 9
0
Archivo: views.py Proyecto: vegten/OIPA
def get_scheduled_tasks(request):
    from rq import use_connection
    from rq_scheduler import Scheduler
    import json

    use_connection() # Use RQ's default Redis connection
    scheduler = Scheduler() # Get a scheduler for the "default" queue
    list_of_job_instances = scheduler.get_jobs()

    jobdata = list()
    for job in list_of_job_instances:
        if "interval" in job.meta:
            interval = job.meta["interval"]
        else:
            interval = 0
        job_dict = { 'job_id': job._id, 'task': job.description, 'period': interval, 'args': job.args, 'queue': "default" }
        jobdata.append(job_dict)

    # scheduler = Scheduler('parser') # Get a scheduler for the "parser" queue
    # list_of_job_instances = scheduler.get_jobs()
    #
    # for job in list_of_job_instances:
    #     if "interval" in job.meta:
    #         interval = job.meta["interval"]
    #     else:
    #         interval = 0
    #     job_dict = { 'job_id': job._id, 'task': job.description, 'period': interval, 'queue': "parser" }
    #     jobdata.append(job_dict)

    data = json.dumps(jobdata)
    return HttpResponse(data, content_type='application/json')
Ejemplo n.º 10
0
def get_scheduled_tasks(request):

    # Use RQ's default Redis connection
    # use_connection()
    # Get a scheduler for the "default" queue
    scheduler = Scheduler(connection=tasks.redis_conn)
    list_of_job_instances = scheduler.get_jobs()

    jobdata = list()
    for job in list_of_job_instances:
        if "interval" in job.meta:
            interval = job.meta["interval"]
        else:
            interval = 0

        job_dict = {
            'job_id': job._id,
            'task': job.description,
            'period': interval,
            'args': job.args,
            'queue': "default"}

        jobdata.append(job_dict)

    data = json.dumps(jobdata)
    return HttpResponse(data, content_type='application/json')
Ejemplo n.º 11
0
    def handle(self, *args, **options):

        with Connection(redis.Redis(**settings.RQ_DATABASE)):
            scheduler = Scheduler('rss_collector')

            jobs = scheduler.get_jobs()
            for job in jobs:
                if job.func_name != 'collector.rss.fetch_rss':
                    continue

                if options.get('replace'):
                    job.cancel()
                    break
                else:
                    raise CommandError('RSS collector task already scheduled')

            try:
                scheduler.schedule(
                    datetime.datetime.now(),
                    fetch_rss,
                    interval=1200,
                    repeat=20000,
                )
            except redis.exceptions.ConnectionError:
                raise CommandError('Redis did not respond')
Ejemplo n.º 12
0
def view_scheduled_jobs():
    with Connection(redis.from_url(app.config['REDIS_URL'])) as conn:
        q = Queue()
        scheduler = Scheduler(connection=conn)
        jobs_list = scheduler.get_jobs()
        for j in jobs_list:
            print(j)
Ejemplo n.º 13
0
def cancel_all(queue_name):
    scheduler = Scheduler()
    for job in scheduler.get_jobs():
        if job.origin == queue_name:
            scheduler.cancel(job)

    return dict(status='OK')
Ejemplo n.º 14
0
def cancel_all(queue_name):
    scheduler = Scheduler()
    for job in scheduler.get_jobs():
        if job.origin == queue_name:
            scheduler.cancel(job)

    return dict(status='OK')
def test_schedule_jobs_bad_current(mocker, queue, jobid):
    sch = Scheduler(queue=queue, connection=queue.connection)
    id0 = 'jobid0'
    sch.cron('* * * * *', func=noop, id=id0, meta={})
    jobs.schedule_jobs(sch)
    assert jobid in sch
    assert id0 not in sch
    assert len(list(sch.get_jobs())) == 1
Ejemplo n.º 16
0
def create_schedules():
    from towerdashboard.jobs import refresh_github_branches
    scheduler = Scheduler(connection=Redis('redis'))
    for j in scheduler.get_jobs():
        scheduler.cancel(j)

    scheduler.schedule(scheduled_time=datetime.utcnow(),
                       func=refresh_github_branches,
                       interval=120, repeat=None, result_ttl=120)
Ejemplo n.º 17
0
def list_todo():
    scheduler = Scheduler(connection=Redis()) # Get a scheduler for the "default" queue

    response.content_type = 'text/plain; charset=utf-8'

    list_of_job_instances = scheduler.get_jobs()
    msg = "\n".join([ str(job) for job in list_of_job_instances])
    logging.info(msg)

    return msg
Ejemplo n.º 18
0
def _get_scheduled_jobs(redis_conn):
    queue_list = _get_queue_list()
    job_list = []

    for q in queue_list:
        scheduler = Scheduler(queue_name=q, connection=redis_conn)
        jobs = scheduler.get_jobs()

        for j in jobs:
            job_list.append({'id': j.get_id(), 'status': j.get_status()})

    return job_list
Ejemplo n.º 19
0
def register_scheduler():
    scheduler = Scheduler('lidarts-bulk', connection=Redis())
    list_of_job_instances = scheduler.get_jobs()
    for job in list_of_job_instances:
        scheduler.cancel(job)
    scheduler.schedule(
        scheduled_time=datetime.utcnow(),
        func='lidarts.tasks.bulk_update_last_seen',
        interval=5,
        repeat=None,
        ttl=10,
    )
Ejemplo n.º 20
0
def scheduled_jobs():
	from rq_scheduler import Scheduler
	import redis
	conn           = redis.from_url(app.config['REDIS_URL'])
	scheduler      = Scheduler(connection=conn)
	scheduled_jobs = scheduler.get_jobs(with_times=True)
	response       = json.dumps({
		"status"   : "ok",
		"count"    : len(scheduled_jobs),
		"jobs"     : [ dict(_[0].__dict__.items() + {"next_work":_[1]}.items() ) for _ in scheduled_jobs ],
	}, default=dthandler)
	return Response(response,  mimetype='application/json')
Ejemplo n.º 21
0
Archivo: job.py Proyecto: pathcl/qpanel
def exists_job_onqueue(queuename, when, hour):
    """
        Check if a job is present on queue
    """
    scheduler = Scheduler(connection=Redis())
    jobs = scheduler.get_jobs()
    for job in jobs:
        if 'reset_stats_queue' in job.func_name:
            args = job.args
            if queuename == args[0] and when == args[1] and hour == args[2]:
                return True
    return False
Ejemplo n.º 22
0
def exists_job_onqueue(queuename, when, hour):
    """
        Check if a job is present on queue
    """
    scheduler = Scheduler(connection=Redis())
    jobs = scheduler.get_jobs()
    for job in jobs:
        if 'reset_stats_queue' in job.func_name:
            args = job.args
            if queuename == args[0] and when == args[1] and hour == args[2]:
                return True
    return False
Ejemplo n.º 23
0
def kill_schedule(channel, verbose=True):
    try:
        scheduler = Scheduler(channel, connection=RCONN)
        jobs_and_times = scheduler.get_jobs(with_times=True)
        for job in jobs_and_times:
            print("job", job)
            scheduler.cancel(job[0].id)
        if verbose:
            print("All Jobs Killed")
        return True
    except Exception as e:
        print("Errors in killing jobs", e)
        return False
def view_schedule():
    scheduler = Scheduler(connection=Redis())  # Get a scheduler for the "default" queue
    l = scheduler.get_jobs(with_times=True)
    data_list = []
    for x in l:
        aa={}
        a,b = x
        aa.update({"schedule_id" : a._id})
        aa.update({"time" : b})
        aa.update({"device_id" : a.args[0]})
        aa.update({"state" : a.args[1]})
        data_list.append(aa)
    return data_list
Ejemplo n.º 25
0
 def get_context_data(self, **kwargs):
     ctx = super(Stats, self).get_context_data(**kwargs)
     ctx.update({
         'queues': Queue.all(connection=self.connection),
         'workers': Worker.all(connection=self.connection),
         'title': 'RQ Status',
     })
     if Scheduler:
         scheduler = Scheduler(self.connection)
         get_queue = lambda job: job.origin
         all_jobs = sorted(scheduler.get_jobs(), key=get_queue)
         ctx['scheduler'] = scheduler
         ctx['scheduled_queues'] = [
             {'name': queue, 'job_count': len(list(jobs))}
             for queue, jobs in groupby(all_jobs, get_queue)]
     return ctx
Ejemplo n.º 26
0
def start_jobs():
    """
        Check if processs enqueue_reset_stats is working on queue if not
        enqueue function
    """
    start_enqueue_reset_stats = False
    scheduler = Scheduler(connection=Redis())
    jobs = scheduler.get_jobs()
    for job in jobs:
        if 'enqueue_reset_stats' in job.func_name:
            start_enqueue_reset_stats = True
            break

    if start_enqueue_reset_stats is False:
        scheduler.schedule(scheduled_time=datetime.datetime.utcnow(),
                           func=enqueue_reset_stats,
                           interval=60)
Ejemplo n.º 27
0
def test_job_method(**kwargs):

    print " ********* start jobs execution ".encode("utf-8")
    frappe.init('')
    conn = get_redis_conn()

    print "** empty failed queue"
    q = get_failed_queue(connection=conn)
    e = q.empty()

    print "** get scheduled jobs"
    scheduler = Scheduler(connection=conn)
    print "jobs scheduled : {0}".format(scheduler.get_jobs())

    print "** get kwards args"
    print "kwargs = {0}".format(kwargs)
    print " ********* end jobs execution ".encode("utf-8")
Ejemplo n.º 28
0
    def get_context_data(self, **kwargs):
        ctx = super(SchedulerDetails, self).get_context_data(**kwargs)
        if Scheduler is None:
            # rq_scheduler is not installed
            raise Http404
        scheduler = Scheduler(self.connection)
        queue = Queue(self.kwargs['queue'], connection=self.connection)
        jobs = filter(lambda (job, next_run): job.origin == queue.name,
                      scheduler.get_jobs(with_times=True))

        ctx.update({
            'queue': queue,
            'jobs': [serialize_scheduled_job(job, next_run)
                     for job, next_run in jobs],
            'title': "Jobs scheduled on '%s' queue" % queue.name,
        })
        return ctx
Ejemplo n.º 29
0
 def get_context_data(self, **kwargs):
     ctx = super(Stats, self).get_context_data(**kwargs)
     ctx.update({
         'queues': Queue.all(connection=self.connection),
         'workers': Worker.all(connection=self.connection),
         'has_permission': True,
         'title': 'RQ Status',
     })
     if Scheduler:
         scheduler = Scheduler(self.connection)
         get_queue = lambda job: job.origin
         all_jobs = sorted(scheduler.get_jobs(), key=get_queue)
         ctx['scheduler'] = scheduler
         ctx['scheduled_queues'] = [{
             'name': queue,
             'job_count': len(list(jobs))
         } for queue, jobs in groupby(all_jobs, get_queue)]
     return ctx
Ejemplo n.º 30
0
def scheduled_jobs():
    from rq_scheduler import Scheduler
    import redis
    conn = redis.from_url(app.config['REDIS_URL'])
    scheduler = Scheduler(connection=conn)
    scheduled_jobs = scheduler.get_jobs(with_times=True)
    response = json.dumps(
        {
            "status":
            "ok",
            "count":
            len(scheduled_jobs),
            "jobs": [
                dict(_[0].__dict__.items() + {"next_work": _[1]}.items())
                for _ in scheduled_jobs
            ],
        },
        default=dthandler)
    return Response(response, mimetype='application/json')
Ejemplo n.º 31
0
def remove_jobs_not_config():
    """
        Remove jobs on queue but not present on config.
        Prevent when in job for reset a queue stats is scheduled but
        after your config is modified or deleted

        TODO: Maybe this could reload by notified in config.ini change
    """
    scheduler = Scheduler(connection=Redis())
    jobs = scheduler.get_jobs()
    for job in jobs:
        if 'reset_stats_queue' in job.func_name:
            # The args for the job of reset_stats_queue are:
            queuename = job.args[0]
            when = job.args[1]
            hour = job.args[2]

            if not exists_job_onconfig(queuename, when, hour):
                job.delete()
Ejemplo n.º 32
0
class QueueScheduler:
    def __init__(self, queue_name, app):
        self.app = app
        self.logger = self.app.logger.bind(queue_name=queue_name)
        self.scheduler = Scheduler(queue_name=queue_name, connection=app.redis)

    def move_jobs(self):
        if self.scheduler.acquire_lock():
            try:
                jobs = self.scheduler.get_jobs()
                self.logger.debug("Lock acquired. Enqueuing scheduled jobs...",
                                  jobs=jobs)
                self.scheduler.enqueue_jobs()
            finally:
                self.scheduler.remove_lock()
        else:
            self.logger.debug(
                "Lock could not be acquired. Enqueuing scheduled jobs skipped. Trying again next cycle."
            )
Ejemplo n.º 33
0
def remove_jobs_not_config():
    """
        Remove jobs on queue but not present on config.
        Prevent when in job for reset a queue stats is scheduled but
        after your config is modified or deleted
    """
    scheduler = Scheduler(connection=Redis())
    queue_for_reset = config.QPanelConfig().queues_for_reset_stats()
    jobs = scheduler.get_jobs()
    for job in jobs:
        if 'reset_stats_queue' in job.func_name:
            delete = True
            for qr in queue_for_reset:
                if qr in queue_for_reset:
                    if (queue_for_reset[qr]['when'] == job.args[1]
                            and queue_for_reset[qr]['hour'] == job.args[2]):
                        delete = False
                if delete:
                    job.delete()
Ejemplo n.º 34
0
Archivo: job.py Proyecto: pathcl/qpanel
def start_jobs():
    """
        Check if processs enqueue_reset_stats is working on queue if not
        enqueue function
    """
    start_enqueue_reset_stats = False
    scheduler = Scheduler(connection=Redis())
    jobs = scheduler.get_jobs()
    for job in jobs:
        if 'enqueue_reset_stats' in job.func_name:
            start_enqueue_reset_stats = True
            break

    if start_enqueue_reset_stats is False:
        scheduler.schedule(
            scheduled_time=datetime.datetime.utcnow(),
            func=enqueue_reset_stats,
            interval=60
        )
Ejemplo n.º 35
0
Archivo: job.py Proyecto: pathcl/qpanel
def remove_jobs_not_config():
    """
        Remove jobs on queue but not present on config.
        Prevent when in job for reset a queue stats is scheduled but
        after your config is modified or deleted
    """
    scheduler = Scheduler(connection=Redis())
    queue_for_reset = config.QPanelConfig().queues_for_reset_stats()
    jobs = scheduler.get_jobs()
    for job in jobs:
        if 'reset_stats_queue' in job.func_name:
            q = job.args[0]
            delete = True
            for qr in queue_for_reset:
                if qr in queue_for_reset:
                    if (queue_for_reset[qr]['when'] == job.args[1] and
                        queue_for_reset[qr]['hour'] == job.args[2]):
                        delete = False
                if delete:
                    job.delete()
Ejemplo n.º 36
0
def start_scheduler(redis_url, redis_password=None, queue_name='job_scheduler_queue'):
    queue = redis_queue(redis_url, redis_password, queue_name)
    scheduler = Scheduler(queue_name=queue.name, connection=queue.connection)

    queue.empty()
    for job in scheduler.get_jobs():
        scheduler.cancel(job)
        logger.info(f"Removed old job {job} from scheduler.")

    # add jobs to scheduler
    job = scheduler.cron(
        cron_string="* * * * *",  # once a minute
        func=log_review,
        args=[datetime.now(), choice(['Alice', 'Bob', 'Carol', 'Dave'])],
        queue_name=queue.name,
        repeat=None
    )
    logger.info(f"Added job {job}")

    return scheduler
Ejemplo n.º 37
0
def list_jobs(queue_name, page):
    current_page = int(page)

    scheduler = Scheduler(queue_name)
    jobs = scheduler.get_jobs(with_times=True)
    if queue_name:
        jobs = list(filter(lambda job_: job_[0].origin == queue_name, jobs))

    per_page = 5
    total_items = len(jobs)
    pages_numbers_in_window = pagination_window(
        total_items, current_page, per_page)
    pages_in_window = [
        dict(number=p, url=url_for('.overview', queue_name=queue_name, page=p))
        for p in pages_numbers_in_window
    ]
    last_page = int(ceil(total_items / float(per_page)))

    prev_page = None
    if current_page > 1:
        prev_page = dict(url=url_for(
            '.overview', queue_name=queue_name, page=(current_page - 1)))

    next_page = None
    if current_page < last_page:
        next_page = dict(url=url_for(
            '.overview', queue_name=queue_name, page=(current_page + 1)))

    pagination = remove_none_values(
        dict(
            pages_in_window=pages_in_window,
            next_page=next_page,
            prev_page=prev_page
        )
    )

    # scheduler.get_jobs(with_times=True)]
    offset = (current_page - 1) * per_page
    job_page = jobs[offset:offset+per_page]
    job_page = [serialize_job(job, at) for (job, at) in job_page]
    return dict(name=queue_name, jobs=job_page, pagination=pagination)
Ejemplo n.º 38
0
    def test_small_float_interval(self):
        """
        Test that scheduler accepts 'interval' of type float, less than 1 second.
        """
        key = Scheduler.scheduler_key
        lock_key = '%s_lock' % Scheduler.scheduler_key
        self.assertNotIn(key, tl(self.testconn.keys('*')))
        scheduler = Scheduler(connection=self.testconn,
                              interval=0.1)  # testing interval = 0.1 second
        self.assertEqual(scheduler._interval, 0.1)

        #acquire lock
        self.assertTrue(scheduler.acquire_lock())
        self.assertIn(lock_key, tl(self.testconn.keys('*')))
        self.assertEqual(self.testconn.ttl(lock_key), 10)  # int(0.1) + 10 = 10

        #enqueue a job
        now = datetime.utcnow()
        job = scheduler.enqueue_at(now, say_hello)
        self.assertIn(job, self.scheduler.get_jobs_to_queue())
        self.assertEqual(len(list(self.scheduler.get_jobs())), 1)

        #remove the lock
        scheduler.remove_lock()

        #test that run works with the small floating-point interval
        def send_stop_signal():
            """
            Sleep for 1 second, then send a INT signal to ourself, so the
            signal handler installed by scheduler.run() is called.
            """
            time.sleep(1)
            os.kill(os.getpid(), signal.SIGINT)

        thread = Thread(target=send_stop_signal)
        thread.start()
        self.assertRaises(SystemExit, scheduler.run)
        thread.join()

        #all jobs must have been scheduled during 1 second
        self.assertEqual(len(list(scheduler.get_jobs())), 0)
Ejemplo n.º 39
0
    def test_small_float_interval(self):
        """
        Test that scheduler accepts 'interval' of type float, less than 1 second.
        """
        key = Scheduler.scheduler_key
        self.assertNotIn(key, tl(self.testconn.keys('*')))
        scheduler = Scheduler(connection=self.testconn, interval=0.1)   # testing interval = 0.1 second
        self.assertEqual(scheduler._interval, 0.1)

        #register birth
        scheduler.register_birth()
        self.assertIn(key, tl(self.testconn.keys('*')))
        self.assertEqual(self.testconn.ttl(key), 10)  # int(0.1) + 10 = 10
        self.assertFalse(self.testconn.hexists(key, 'death'))

        #enqueue a job
        now = datetime.utcnow()
        job = scheduler.enqueue_at(now, say_hello)
        self.assertIn(job, self.scheduler.get_jobs_to_queue())
        self.assertEqual(len(self.scheduler.get_jobs()), 1)

        #register death
        scheduler.register_death()

        #test that run works with the small floating-point interval
        def send_stop_signal():
            """
            Sleep for 1 second, then send a INT signal to ourself, so the
            signal handler installed by scheduler.run() is called.
            """
            time.sleep(1)
            os.kill(os.getpid(), signal.SIGINT)
        thread = Thread(target=send_stop_signal)
        thread.start()
        self.assertRaises(SystemExit, scheduler.run)
        thread.join()

        #all jobs must have been scheduled during 1 second
        self.assertEqual(len(scheduler.get_jobs()), 0)
Ejemplo n.º 40
0
def list_jobs(queue_name, page):
    current_page = int(page)

    scheduler = Scheduler(queue_name)
    jobs = scheduler.get_jobs(with_times=True)
    if queue_name:
        jobs = list(filter(lambda job_: job_[0].origin == queue_name, jobs))

    per_page = 5
    total_items = len(jobs)
    pages_numbers_in_window = pagination_window(total_items, current_page,
                                                per_page)
    pages_in_window = [
        dict(number=p, url=url_for('.overview', queue_name=queue_name, page=p))
        for p in pages_numbers_in_window
    ]
    last_page = int(ceil(total_items / float(per_page)))

    prev_page = None
    if current_page > 1:
        prev_page = dict(url=url_for(
            '.overview', queue_name=queue_name, page=(current_page - 1)))

    next_page = None
    if current_page < last_page:
        next_page = dict(url=url_for(
            '.overview', queue_name=queue_name, page=(current_page + 1)))

    pagination = remove_none_values(
        dict(pages_in_window=pages_in_window,
             next_page=next_page,
             prev_page=prev_page))

    # scheduler.get_jobs(with_times=True)]
    offset = (current_page - 1) * per_page
    job_page = jobs[offset:offset + per_page]
    job_page = [serialize_job(job, at) for (job, at) in job_page]
    return dict(name=queue_name, jobs=job_page, pagination=pagination)
Ejemplo n.º 41
0
class RqClient:

    def __init__(self, conf: RqConfig, prefix: str = ''):
        self.redis_conn = Redis(host=conf.HOST, port=conf.PORT, db=conf.DB)
        self.queue = Queue(connection=self.redis_conn)
        self.prefix = prefix
        self.scheduler = Scheduler(connection=self.redis_conn, queue=self.queue)
        self.scheduler_conf_path = conf.SCHEDULER_CONF_PATH
        self.control = Control(self.redis_conn)

    def init_scheduler(self):
        # remove old scheduled tasks
        for job in self.scheduler.get_jobs():
            self.scheduler.cancel(job)

        # create new tasks from config file
        if self.scheduler_conf_path:
            with open(self.scheduler_conf_path) as f:
                for entry in json.load(f):
                    self.scheduler.cron(
                        entry['schedule'],
                        f'{self.prefix}.{entry["task"]}',
                        kwargs=entry['kwargs'] if 'kwargs' in entry else None
                    )

    def send_task(self, name, args=None, time_limit=None, soft_time_limit=None):
        try:
            job = self.queue.enqueue(f'{self.prefix}.{name}', ttl=time_limit, args=args)
            return ResultWrapper(job)
        except Exception as ex:
            logging.getLogger(__name__).error(ex)

    def AsyncResult(self, ident):
        try:
            return ResultWrapper(Job.fetch(ident, connection=self.redis_conn))
        except NoSuchJobError:
            logging.getLogger(__name__).warning(f'Job {ident} not found')
            return None
Ejemplo n.º 42
0
def get_scheduled_tasks(request):
    from rq import use_connection
    from rq_scheduler import Scheduler
    import json

    use_connection()  # Use RQ's default Redis connection
    scheduler = Scheduler()  # Get a scheduler for the "default" queue
    list_of_job_instances = scheduler.get_jobs()

    jobdata = list()
    for job in list_of_job_instances:
        if "interval" in job.meta:
            interval = job.meta["interval"]
        else:
            interval = 0
        job_dict = {
            'job_id': job._id,
            'task': job.description,
            'period': interval,
            'args': job.args,
            'queue': "default"
        }
        jobdata.append(job_dict)

    # scheduler = Scheduler('parser') # Get a scheduler for the "parser" queue
    # list_of_job_instances = scheduler.get_jobs()
    #
    # for job in list_of_job_instances:
    #     if "interval" in job.meta:
    #         interval = job.meta["interval"]
    #     else:
    #         interval = 0
    #     job_dict = { 'job_id': job._id, 'task': job.description, 'period': interval, 'queue': "parser" }
    #     jobdata.append(job_dict)

    data = json.dumps(jobdata)
    return HttpResponse(data, content_type='application/json')
Ejemplo n.º 43
0
class TestScheduler(RQTestCase):

    def setUp(self):
        super(TestScheduler, self).setUp()
        self.scheduler = Scheduler(connection=self.testconn)

    def test_birth_and_death_registration(self):
        """
        When scheduler registers it's birth, besides creating a key, it should
        also set an expiry that's a few seconds longer than it's polling
        interval so it automatically expires if scheduler is unexpectedly
        terminated.
        """
        key = Scheduler.scheduler_key
        self.assertNotIn(key, tl(self.testconn.keys('*')))
        scheduler = Scheduler(connection=self.testconn, interval=20)
        scheduler.register_birth()
        self.assertIn(key, tl(self.testconn.keys('*')))
        self.assertEqual(self.testconn.ttl(key), 30)
        self.assertFalse(self.testconn.hexists(key, 'death'))
        self.assertRaises(ValueError, scheduler.register_birth)
        scheduler.register_death()
        self.assertTrue(self.testconn.hexists(key, 'death'))

    def test_create_job(self):
        """
        Ensure that jobs are created properly.
        """
        job = self.scheduler._create_job(say_hello, args=(), kwargs={})
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job, job_from_queue)
        self.assertEqual(job_from_queue.func, say_hello)

    def test_create_job_with_ttl(self):
        """
        Ensure that TTL is passed to RQ.
        """
        job = self.scheduler._create_job(say_hello, ttl=2, args=(), kwargs={})
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(2, job_from_queue.ttl)

    def test_create_job_with_id(self):
        """
        Ensure that ID is passed to RQ.
        """
        job = self.scheduler._create_job(say_hello, id='id test', args=(), kwargs={})
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual('id test', job_from_queue.id)

    def test_create_job_with_description(self):
        """
        Ensure that description is passed to RQ.
        """
        job = self.scheduler._create_job(say_hello, description='description', args=(), kwargs={})
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual('description', job_from_queue.description)

    def test_job_not_persisted_if_commit_false(self):
        """
        Ensure jobs are only saved to Redis if commit=True.
        """
        job = self.scheduler._create_job(say_hello, commit=False)
        self.assertEqual(self.testconn.hgetall(job.key), {})

    def test_create_scheduled_job(self):
        """
        Ensure that scheduled jobs are put in the scheduler queue with the right score
        """
        scheduled_time = datetime.utcnow()
        job = self.scheduler.enqueue_at(scheduled_time, say_hello)
        self.assertEqual(job, Job.fetch(job.id, connection=self.testconn))
        self.assertIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))
        self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
                         to_unix(scheduled_time))

    def test_enqueue_in(self):
        """
        Ensure that jobs have the right scheduled time.
        """
        right_now = datetime.utcnow()
        time_delta = timedelta(minutes=1)
        job = self.scheduler.enqueue_in(time_delta, say_hello)
        self.assertIn(job.id,
                      tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))
        self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
                         to_unix(right_now + time_delta))
        time_delta = timedelta(hours=1)
        job = self.scheduler.enqueue_in(time_delta, say_hello)
        self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
                         to_unix(right_now + time_delta))

    def test_get_jobs(self):
        """
        Ensure get_jobs() returns all jobs until the specified time.
        """
        now = datetime.utcnow()
        job = self.scheduler.enqueue_at(now, say_hello)
        self.assertIn(job, self.scheduler.get_jobs(now))
        future_time = now + timedelta(hours=1)
        job = self.scheduler.enqueue_at(future_time, say_hello)
        self.assertIn(job, self.scheduler.get_jobs(timedelta(hours=1, seconds=1)))
        self.assertIn(job, [j[0] for j in self.scheduler.get_jobs(with_times=True)])
        self.assertIsInstance(self.scheduler.get_jobs(with_times=True)[0][1], datetime)
        self.assertNotIn(job, self.scheduler.get_jobs(timedelta(minutes=59, seconds=59)))

    def test_get_jobs_to_queue(self):
        """
        Ensure that jobs scheduled the future are not queued.
        """
        now = datetime.utcnow()
        job = self.scheduler.enqueue_at(now, say_hello)
        self.assertIn(job, self.scheduler.get_jobs_to_queue())
        future_time = now + timedelta(hours=1)
        job = self.scheduler.enqueue_at(future_time, say_hello)
        self.assertNotIn(job, self.scheduler.get_jobs_to_queue())

    def test_enqueue_job(self):
        """
        When scheduled job is enqueued, make sure:
        - Job is removed from the sorted set of scheduled jobs
        - "enqueued_at" attribute is properly set
        - Job appears in the right queue
        - Queue is recognized by rq's Queue.all()
        """
        now = datetime.utcnow()
        queue_name = 'foo'
        scheduler = Scheduler(connection=self.testconn, queue_name=queue_name)

        job = scheduler.enqueue_at(now, say_hello)
        self.scheduler.enqueue_job(job)
        self.assertNotIn(job, tl(self.testconn.zrange(scheduler.scheduled_jobs_key, 0, 10)))
        job = Job.fetch(job.id, connection=self.testconn)
        self.assertTrue(job.enqueued_at is not None)
        queue = scheduler.get_queue_for_job(job)
        self.assertIn(job, queue.jobs)
        queue = Queue.from_queue_key('rq:queue:{0}'.format(queue_name))
        self.assertIn(job, queue.jobs)
        self.assertIn(queue, Queue.all())

    def test_job_membership(self):
        now = datetime.utcnow()
        job = self.scheduler.enqueue_at(now, say_hello)
        self.assertIn(job, self.scheduler)
        self.assertIn(job.id, self.scheduler)
        self.assertNotIn("non-existing-job-id", self.scheduler)

    def test_cancel_scheduled_job(self):
        """
        When scheduled job is canceled, make sure:
        - Job is removed from the sorted set of scheduled jobs
        """
        # schedule a job to be enqueued one minute from now
        time_delta = timedelta(minutes=1)
        job = self.scheduler.enqueue_in(time_delta, say_hello)
        # cancel the scheduled job and check that it's gone from the set
        self.scheduler.cancel(job)
        self.assertNotIn(job.id, tl(self.testconn.zrange(
            self.scheduler.scheduled_jobs_key, 0, 1)))

    def test_change_execution_time(self):
        """
        Ensure ``change_execution_time`` is called, ensure that job's score is updated
        """
        job = self.scheduler.enqueue_at(datetime.utcnow(), say_hello)
        new_date = datetime(2010, 1, 1)
        self.scheduler.change_execution_time(job, new_date)
        self.assertEqual(to_unix(new_date),
            self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id))
        self.scheduler.cancel(job)
        self.assertRaises(ValueError, self.scheduler.change_execution_time, job, new_date)

    def test_args_kwargs_are_passed_correctly(self):
        """
        Ensure that arguments and keyword arguments are properly saved to jobs.
        """
        job = self.scheduler.enqueue_at(datetime.utcnow(), simple_addition, 1, 1, 1)
        self.assertEqual(job.args, (1, 1, 1))
        job = self.scheduler.enqueue_at(datetime.utcnow(), simple_addition, z=1, y=1, x=1)
        self.assertEqual(job.kwargs, {'x': 1, 'y': 1, 'z': 1})
        job = self.scheduler.enqueue_at(datetime.utcnow(), simple_addition, 1, z=1, y=1)
        self.assertEqual(job.kwargs, {'y': 1, 'z': 1})
        self.assertEqual(job.args, (1,))

        time_delta = timedelta(minutes=1)
        job = self.scheduler.enqueue_in(time_delta, simple_addition, 1, 1, 1)
        self.assertEqual(job.args, (1, 1, 1))
        job = self.scheduler.enqueue_in(time_delta, simple_addition, z=1, y=1, x=1)
        self.assertEqual(job.kwargs, {'x': 1, 'y': 1, 'z': 1})
        job = self.scheduler.enqueue_in(time_delta, simple_addition, 1, z=1, y=1)
        self.assertEqual(job.kwargs, {'y': 1, 'z': 1})
        self.assertEqual(job.args, (1,))

    def test_enqueue_is_deprecated(self):
        """
        Ensure .enqueue() throws a DeprecationWarning
        """
        with warnings.catch_warnings(record=True) as w:
            # Enable all warnings
            warnings.simplefilter("always")
            job = self.scheduler.enqueue(datetime.utcnow(), say_hello)
            self.assertEqual(1, len(w))
            self.assertEqual(w[0].category, DeprecationWarning)

    def test_enqueue_periodic(self):
        """
        Ensure .enqueue_periodic() throws a DeprecationWarning
        """
        with warnings.catch_warnings(record=True) as w:
            # Enable all warnings
            warnings.simplefilter("always")
            job = self.scheduler.enqueue_periodic(datetime.utcnow(), 1, None, say_hello)
            self.assertEqual(1, len(w))
            self.assertEqual(w[0].category, DeprecationWarning)

    def test_interval_and_repeat_persisted_correctly(self):
        """
        Ensure that interval and repeat attributes get correctly saved in Redis.
        """
        job = self.scheduler.schedule(datetime.utcnow(), say_hello, interval=10, repeat=11)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job_from_queue.meta['interval'], 10)
        self.assertEqual(job_from_queue.meta['repeat'], 11)

    def test_crontab_persisted_correctly(self):
        """
        Ensure that crontab attribute gets correctly saved in Redis.
        """
        # create a job that runs one minute past each whole hour
        job = self.scheduler.cron("1 * * * *", say_hello)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job_from_queue.meta['cron_string'], "1 * * * *")

        # get the scheduled_time and convert it to a datetime object
        unix_time = self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id)
        datetime_time = from_unix(unix_time)

        # check that minute=1, seconds=0, and is within an hour
        assert datetime_time.minute == 1
        assert datetime_time.second == 0
        assert datetime_time - datetime.utcnow() < timedelta(hours=1)

    def test_crontab_sets_timeout(self):
        """
        Ensure that a job scheduled via crontab can be created with
        a custom timeout.
        """
        timeout = 13
        job = self.scheduler.cron("1 * * * *", say_hello, timeout=timeout)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job_from_queue.timeout, timeout)

    def test_crontab_sets_id(self):
        """
        Ensure that a job scheduled via crontab can be created with
        a custom id
        """
        job_id = "hello-job-id"
        job = self.scheduler.cron("1 * * * *", say_hello, id=job_id)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job_id, job_from_queue.id)

    def test_crontab_sets_default_result_ttl(self):
        """
        Ensure that a job scheduled via crontab gets proper default
        result_ttl (-1) periodic tasks.
        """
        job = self.scheduler.cron("1 * * * *", say_hello)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(-1, job_from_queue.result_ttl)

    def test_crontab_sets_description(self):
        """
        Ensure that a job scheduled via crontab can be created with
        a custom description
        """
        description = 'test description'
        job = self.scheduler.cron("1 * * * *", say_hello, description=description)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(description, job_from_queue.description)

    def test_repeat_without_interval_raises_error(self):
        # Ensure that an error is raised if repeat is specified without interval
        def create_job():
            self.scheduler.schedule(datetime.utcnow(), say_hello, repeat=11)
        self.assertRaises(ValueError, create_job)

    def test_job_with_intervals_get_rescheduled(self):
        """
        Ensure jobs with interval attribute are put back in the scheduler
        """
        time_now = datetime.utcnow()
        interval = 10
        job = self.scheduler.schedule(time_now, say_hello, interval=interval)
        self.scheduler.enqueue_job(job)
        self.assertIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))
        self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
                         to_unix(time_now) + interval)

        # Now the same thing using enqueue_periodic
        job = self.scheduler.enqueue_periodic(time_now, interval, None, say_hello)
        self.scheduler.enqueue_job(job)
        self.assertIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))
        self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
                         to_unix(time_now) + interval)

    def test_job_with_crontab_get_rescheduled(self):
        # Create a job with a cronjob_string
        job = self.scheduler.cron("1 * * * *", say_hello)

        # current unix_time
        old_next_scheduled_time = self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id)

        # change crontab
        job.meta['cron_string'] = "2 * * * *"

        # enqueue the job
        self.scheduler.enqueue_job(job)

        self.assertIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))

        # check that next scheduled time has changed
        self.assertNotEqual(old_next_scheduled_time,
                            self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id))

        # check that new next scheduled time is set correctly
        expected_next_scheduled_time = to_unix(get_next_scheduled_time("2 * * * *"))
        self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
                         expected_next_scheduled_time)

    def test_job_with_repeat(self):
        """
        Ensure jobs with repeat attribute are put back in the scheduler
        X (repeat) number of times
        """
        time_now = datetime.utcnow()
        interval = 10
        # If job is repeated once, the job shouldn't be put back in the queue
        job = self.scheduler.schedule(time_now, say_hello, interval=interval, repeat=1)
        self.scheduler.enqueue_job(job)
        self.assertNotIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))

        # If job is repeated twice, it should only be put back in the queue once
        job = self.scheduler.schedule(time_now, say_hello, interval=interval, repeat=2)
        self.scheduler.enqueue_job(job)
        self.assertIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))
        self.scheduler.enqueue_job(job)
        self.assertNotIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))

        time_now = datetime.utcnow()
        # Now the same thing using enqueue_periodic
        job = self.scheduler.enqueue_periodic(time_now, interval, 1, say_hello)
        self.scheduler.enqueue_job(job)
        self.assertNotIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))

        # If job is repeated twice, it should only be put back in the queue once
        job = self.scheduler.enqueue_periodic(time_now, interval, 2, say_hello)
        self.scheduler.enqueue_job(job)
        self.assertIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))
        self.scheduler.enqueue_job(job)
        self.assertNotIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))

    def test_missing_jobs_removed_from_scheduler(self):
        """
        Ensure jobs that don't exist when queued are removed from the scheduler.
        """
        job = self.scheduler.schedule(datetime.utcnow(), say_hello)
        job.cancel()
        self.scheduler.get_jobs_to_queue()
        self.assertNotIn(job.id, tl(self.testconn.zrange(
            self.scheduler.scheduled_jobs_key, 0, 1)))

    def test_periodic_jobs_sets_result_ttl(self):
        """
        Ensure periodic jobs set result_ttl to infinite.
        """
        job = self.scheduler.schedule(datetime.utcnow(), say_hello, interval=5)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job.result_ttl, -1)

    def test_periodic_jobs_sets_ttl(self):
        """
        Ensure periodic jobs sets correctly ttl.
        """
        job = self.scheduler.schedule(datetime.utcnow(), say_hello, interval=5, ttl=4)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job.ttl, 4)

    def test_periodic_job_sets_id(self):
        """
        Ensure that ID is passed to RQ by schedule.
        """
        job = self.scheduler.schedule(datetime.utcnow(), say_hello, interval=5, id='id test')
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual('id test', job.id)

    def test_periodic_job_sets_description(self):
        """
        Ensure that description is passed to RQ by schedule.
        """
        job = self.scheduler.schedule(datetime.utcnow(), say_hello, interval=5, description='description')
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual('description', job.description)

    def test_run(self):
        """
        Check correct signal handling in Scheduler.run().
        """
        def send_stop_signal():
            """
            Sleep for 1 second, then send a INT signal to ourself, so the
            signal handler installed by scheduler.run() is called.
            """
            time.sleep(1)
            os.kill(os.getpid(), signal.SIGINT)
        thread = Thread(target=send_stop_signal)
        thread.start()
        self.assertRaises(SystemExit, self.scheduler.run)
        thread.join()

    def test_scheduler_w_o_explicit_connection(self):
        """
        Ensure instantiating Scheduler w/o explicit connection works.
        """
        s = Scheduler()
        self.assertEqual(s.connection, self.testconn)

    def test_small_float_interval(self):
        """
        Test that scheduler accepts 'interval' of type float, less than 1 second.
        """
        key = Scheduler.scheduler_key
        self.assertNotIn(key, tl(self.testconn.keys('*')))
        scheduler = Scheduler(connection=self.testconn, interval=0.1)   # testing interval = 0.1 second
        self.assertEqual(scheduler._interval, 0.1)

        #register birth
        scheduler.register_birth()
        self.assertIn(key, tl(self.testconn.keys('*')))
        self.assertEqual(self.testconn.ttl(key), 10)  # int(0.1) + 10 = 10
        self.assertFalse(self.testconn.hexists(key, 'death'))

        #enqueue a job
        now = datetime.utcnow()
        job = scheduler.enqueue_at(now, say_hello)
        self.assertIn(job, self.scheduler.get_jobs_to_queue())
        self.assertEqual(len(self.scheduler.get_jobs()), 1)

        #register death
        scheduler.register_death()

        #test that run works with the small floating-point interval
        def send_stop_signal():
            """
            Sleep for 1 second, then send a INT signal to ourself, so the
            signal handler installed by scheduler.run() is called.
            """
            time.sleep(1)
            os.kill(os.getpid(), signal.SIGINT)
        thread = Thread(target=send_stop_signal)
        thread.start()
        self.assertRaises(SystemExit, scheduler.run)
        thread.join()

        #all jobs must have been scheduled during 1 second
        self.assertEqual(len(scheduler.get_jobs()), 0)
Ejemplo n.º 44
0
class TestScheduler(RQTestCase):

    def setUp(self):
        super(TestScheduler, self).setUp()
        self.scheduler = Scheduler(connection=self.testconn)
    
    def test_birth_and_death_registration(self):
        """
        When scheduler registers it's birth, besides creating a key, it should
        also set an expiry that's a few seconds longer than it's polling
        interval so it automatically expires if scheduler is unexpectedly 
        terminated.
        """
        key = Scheduler.scheduler_key
        self.assertNotIn(key, tl(self.testconn.keys('*')))
        scheduler = Scheduler(connection=self.testconn, interval=20)
        scheduler.register_birth()
        self.assertIn(key, tl(self.testconn.keys('*')))
        self.assertEqual(self.testconn.ttl(key), 30)
        self.assertFalse(self.testconn.hexists(key, 'death'))
        self.assertRaises(ValueError, scheduler.register_birth)
        scheduler.register_death()
        self.assertTrue(self.testconn.hexists(key, 'death'))

    def test_create_job(self):
        """
        Ensure that jobs are created properly.
        """
        job = self.scheduler._create_job(say_hello, args=(), kwargs={})
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job, job_from_queue)
        self.assertEqual(job_from_queue.func, say_hello)

    def test_job_not_persisted_if_commit_false(self):
        """
        Ensure jobs are only saved to Redis if commit=True.
        """
        job = self.scheduler._create_job(say_hello, commit=False)
        self.assertEqual(self.testconn.hgetall(job.key), {})

    def test_create_scheduled_job(self):
        """
        Ensure that scheduled jobs are put in the scheduler queue with the right score
        """
        scheduled_time = datetime.utcnow()
        job = self.scheduler.enqueue_at(scheduled_time, say_hello)
        self.assertEqual(job, Job.fetch(job.id, connection=self.testconn))
        self.assertIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))
        self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
                         to_unix(scheduled_time))

    def test_enqueue_in(self):
        """
        Ensure that jobs have the right scheduled time.
        """
        right_now = datetime.utcnow()
        time_delta = timedelta(minutes=1)
        job = self.scheduler.enqueue_in(time_delta, say_hello)
        self.assertIn(job.id,
                      tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))
        self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
                         to_unix(right_now + time_delta))
        time_delta = timedelta(hours=1)
        job = self.scheduler.enqueue_in(time_delta, say_hello)
        self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
                         to_unix(right_now + time_delta))

    def test_get_jobs(self):
        """
        Ensure get_jobs() returns all jobs until the specified time.
        """
        now = datetime.utcnow()
        job = self.scheduler.enqueue_at(now, say_hello)
        self.assertIn(job, self.scheduler.get_jobs(now))
        future_time = now + timedelta(hours=1)
        job = self.scheduler.enqueue_at(future_time, say_hello)
        self.assertIn(job, self.scheduler.get_jobs(timedelta(hours=1, seconds=1)))
        self.assertIn(job, [j[0] for j in self.scheduler.get_jobs(with_times=True)])
        self.assertIsInstance(self.scheduler.get_jobs(with_times=True)[0][1], datetime)
        self.assertNotIn(job, self.scheduler.get_jobs(timedelta(minutes=59, seconds=59)))
    
    def test_get_jobs_to_queue(self):
        """
        Ensure that jobs scheduled the future are not queued.
        """
        now = datetime.utcnow()
        job = self.scheduler.enqueue_at(now, say_hello)
        self.assertIn(job, self.scheduler.get_jobs_to_queue())
        future_time = now + timedelta(hours=1)
        job = self.scheduler.enqueue_at(future_time, say_hello)
        self.assertNotIn(job, self.scheduler.get_jobs_to_queue())

    def test_enqueue_job(self):
        """
        When scheduled job is enqueued, make sure:
        - Job is removed from the sorted set of scheduled jobs
        - "enqueued_at" attribute is properly set
        - Job appears in the right queue
        """
        now = datetime.utcnow()
        queue_name = 'foo'
        scheduler = Scheduler(connection=self.testconn, queue_name=queue_name)

        job = scheduler.enqueue_at(now, say_hello)
        self.scheduler.enqueue_job(job)
        self.assertNotIn(job, tl(self.testconn.zrange(scheduler.scheduled_jobs_key, 0, 10)))
        job = Job.fetch(job.id, connection=self.testconn)
        self.assertTrue(job.enqueued_at is not None)
        queue = scheduler.get_queue_for_job(job)
        self.assertIn(job, queue.jobs)
        queue = Queue.from_queue_key('rq:queue:{0}'.format(queue_name))
        self.assertIn(job, queue.jobs)

    def test_job_membership(self):
        now = datetime.utcnow()
        job = self.scheduler.enqueue_at(now, say_hello)
        self.assertIn(job, self.scheduler)
        self.assertIn(job.id, self.scheduler)
        self.assertNotIn("non-existing-job-id", self.scheduler)

    def test_cancel_scheduled_job(self):
        """
        When scheduled job is canceled, make sure:
        - Job is removed from the sorted set of scheduled jobs
        """
        # schedule a job to be enqueued one minute from now
        time_delta = timedelta(minutes=1)
        job = self.scheduler.enqueue_in(time_delta, say_hello)
        # cancel the scheduled job and check that it's gone from the set
        self.scheduler.cancel(job)
        self.assertNotIn(job.id, tl(self.testconn.zrange(
            self.scheduler.scheduled_jobs_key, 0, 1)))

    def test_change_execution_time(self):
        """
        Ensure ``change_execution_time`` is called, ensure that job's score is updated
        """
        job = self.scheduler.enqueue_at(datetime.utcnow(), say_hello)
        new_date = datetime(2010, 1, 1)
        self.scheduler.change_execution_time(job, new_date)
        self.assertEqual(to_unix(new_date),
            self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id))
        self.scheduler.cancel(job)
        self.assertRaises(ValueError, self.scheduler.change_execution_time, job, new_date)

    def test_args_kwargs_are_passed_correctly(self):
        """
        Ensure that arguments and keyword arguments are properly saved to jobs.
        """
        job = self.scheduler.enqueue_at(datetime.utcnow(), simple_addition, 1, 1, 1)
        self.assertEqual(job.args, (1, 1, 1))
        job = self.scheduler.enqueue_at(datetime.utcnow(), simple_addition, z=1, y=1, x=1)
        self.assertEqual(job.kwargs, {'x': 1, 'y': 1, 'z': 1})
        job = self.scheduler.enqueue_at(datetime.utcnow(), simple_addition, 1, z=1, y=1)
        self.assertEqual(job.kwargs, {'y': 1, 'z': 1})
        self.assertEqual(job.args, (1,))

        time_delta = timedelta(minutes=1)
        job = self.scheduler.enqueue_in(time_delta, simple_addition, 1, 1, 1)
        self.assertEqual(job.args, (1, 1, 1))
        job = self.scheduler.enqueue_in(time_delta, simple_addition, z=1, y=1, x=1)
        self.assertEqual(job.kwargs, {'x': 1, 'y': 1, 'z': 1})
        job = self.scheduler.enqueue_in(time_delta, simple_addition, 1, z=1, y=1)
        self.assertEqual(job.kwargs, {'y': 1, 'z': 1})
        self.assertEqual(job.args, (1,))

    def test_enqueue_is_deprecated(self):
        """
        Ensure .enqueue() throws a DeprecationWarning
        """
        with warnings.catch_warnings(record=True) as w:
            # Enable all warnings
            warnings.simplefilter("always")
            job = self.scheduler.enqueue(datetime.utcnow(), say_hello)
            self.assertEqual(1, len(w))
            self.assertEqual(w[0].category, DeprecationWarning)

    def test_enqueue_periodic(self):
        """
        Ensure .enqueue_periodic() throws a DeprecationWarning
        """
        with warnings.catch_warnings(record=True) as w:
            # Enable all warnings
            warnings.simplefilter("always")
            job = self.scheduler.enqueue_periodic(datetime.utcnow(), 1, None, say_hello)
            self.assertEqual(1, len(w))
            self.assertEqual(w[0].category, DeprecationWarning)

    def test_interval_and_repeat_persisted_correctly(self):
        """
        Ensure that interval and repeat attributes get correctly saved in Redis.
        """
        job = self.scheduler.schedule(datetime.utcnow(), say_hello, interval=10, repeat=11)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job_from_queue.meta['interval'], 10)
        self.assertEqual(job_from_queue.meta['repeat'], 11)

    def test_repeat_without_interval_raises_error(self):
        # Ensure that an error is raised if repeat is specified without interval
        def create_job():
            self.scheduler.schedule(datetime.utcnow(), say_hello, repeat=11)
        self.assertRaises(ValueError, create_job)

    def test_job_with_intervals_get_rescheduled(self):
        """
        Ensure jobs with interval attribute are put back in the scheduler
        """
        time_now = datetime.utcnow()
        interval = 10
        job = self.scheduler.schedule(time_now, say_hello, interval=interval)
        self.scheduler.enqueue_job(job)
        self.assertIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))
        self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
                         to_unix(time_now) + interval)

        # Now the same thing using enqueue_periodic
        job = self.scheduler.enqueue_periodic(time_now, interval, None, say_hello)
        self.scheduler.enqueue_job(job)
        self.assertIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))
        self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
                         to_unix(time_now) + interval)

    def test_job_with_repeat(self):
        """
        Ensure jobs with repeat attribute are put back in the scheduler
        X (repeat) number of times
        """
        time_now = datetime.utcnow()
        interval = 10
        # If job is repeated once, the job shouldn't be put back in the queue
        job = self.scheduler.schedule(time_now, say_hello, interval=interval, repeat=1)
        self.scheduler.enqueue_job(job)
        self.assertNotIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))

        # If job is repeated twice, it should only be put back in the queue once
        job = self.scheduler.schedule(time_now, say_hello, interval=interval, repeat=2)
        self.scheduler.enqueue_job(job)
        self.assertIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))
        self.scheduler.enqueue_job(job)
        self.assertNotIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))

        time_now = datetime.utcnow()
        # Now the same thing using enqueue_periodic
        job = self.scheduler.enqueue_periodic(time_now, interval, 1, say_hello)
        self.scheduler.enqueue_job(job)
        self.assertNotIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))

        # If job is repeated twice, it should only be put back in the queue once
        job = self.scheduler.enqueue_periodic(time_now, interval, 2, say_hello)
        self.scheduler.enqueue_job(job)
        self.assertIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))
        self.scheduler.enqueue_job(job)
        self.assertNotIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))

    def test_missing_jobs_removed_from_scheduler(self):
        """
        Ensure jobs that don't exist when queued are removed from the scheduler.
        """
        job = self.scheduler.schedule(datetime.utcnow(), say_hello)
        job.cancel()
        self.scheduler.get_jobs_to_queue()
        self.assertNotIn(job.id, tl(self.testconn.zrange(
            self.scheduler.scheduled_jobs_key, 0, 1)))

    def test_periodic_jobs_sets_ttl(self):
        """
        Ensure periodic jobs set result_ttl to infinite.
        """
        job = self.scheduler.schedule(datetime.utcnow(), say_hello, interval=5)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job.result_ttl, -1)

    def test_run(self):
        """
        Check correct signal handling in Scheduler.run().
        """
        def send_stop_signal():
            """
            Sleep for 1 second, then send a INT signal to ourself, so the
            signal handler installed by scheduler.run() is called.
            """
            time.sleep(1)
            os.kill(os.getpid(), signal.SIGINT)
        thread = Thread(target=send_stop_signal)
        thread.start()
        self.assertRaises(SystemExit, self.scheduler.run)
        thread.join()

    def test_scheduler_w_o_explicit_connection(self):
        """
        Ensure instantiating Scheduler w/o explicit connection works.
        """
        s = Scheduler()
        self.assertEqual(s.connection, self.testconn)

    def test_no_functions_from__main__module(self):
        """
        Ensure functions from the __main__ module are not accepted for scheduling.
        """
        def dummy():
            return 1
        # Fake __main__ module function
        dummy.__module__ = "__main__"
        self.assertRaises(ValueError, self.scheduler._create_job, dummy)
Ejemplo n.º 45
0
def get_statistics():
    queues = []
    workers = []
    workers_collections = collect_workers_by_connection(QUEUES_LIST)
    for index, config in enumerate(QUEUES_LIST):

        queue = get_queue_by_index(index)
        connection = queue.connection
        connection_kwargs = connection.connection_pool.connection_kwargs

        # Raw access to the first item from left of the redis list.
        # This might not be accurate since new job can be added from the left
        # with `at_front` parameters.
        # Ideally rq should supports Queue.oldest_job
        last_job_id = connection.lindex(queue.key, 0)
        last_job = queue.fetch_job(last_job_id.decode('utf-8')) if last_job_id else None
        if last_job:
            oldest_job_timestamp = to_localtime(last_job.enqueued_at)\
                .strftime('%Y-%m-%d, %H:%M:%S')
        else:
            oldest_job_timestamp = "-"

        # parse_class is not needed and not JSON serializable
        try:
            del(connection_kwargs['parser_class'])
        except KeyError:
            pass

        queue_data = {
            'name': queue.name,
            'jobs': queue.count,
            'oldest_job_timestamp': oldest_job_timestamp,
            'index': index,
            'connection_kwargs': connection_kwargs
        }

        if queue.name == 'failed':
            queue_data['workers'] = '-'
            queue_data['finished_jobs'] = '-'
            queue_data['started_jobs'] = '-'
            queue_data['deferred_jobs'] = '-'

        else:
            connection = get_connection(queue.name)
            all_workers = get_all_workers_by_configuration(
                config['connection_config'],
                workers_collections
            )

            seen_workers = [w.name for w in workers]
            workers += [w for w in all_workers if w.name not in seen_workers]

            queue_workers = [worker for worker in all_workers if queue in worker.queues]
            queue_data['workers'] = len(queue_workers)

            finished_job_registry = FinishedJobRegistry(queue.name, connection)
            started_job_registry = StartedJobRegistry(queue.name, connection)
            deferred_job_registry = DeferredJobRegistry(queue.name, connection)
            queue_data['finished_jobs'] = len(finished_job_registry)
            queue_data['started_jobs'] = len(started_job_registry)
            queue_data['deferred_jobs'] = len(deferred_job_registry)

        queues.append(queue_data)

    # TODO: Right now the scheduler can run on multiple queues, but multiple
    # queues can use the same connection. Either need to dedupe connections or
    # split scheduled into its own queue, like failed.
    #
    # TODO: the real solution here is ditch allowing queues to have separate
    # connections - make a single global connection and multiple queues are
    # only separated by name. This will solve the multiple failed queue issue
    # too. But was there a reason to allow multiple connections? Also, this
    # will require some massive doc updates.
    scheduled_jobs = []
    scheduler_running = False
    scheduler_installed = False
    try:
        from rq_scheduler import Scheduler
        scheduler_installed = True
    except ImportError:
        pass
    else:
        connection = get_connection('default')
        scheduler = Scheduler(connection=connection)

        scheduled_jobs = scheduler.get_jobs(with_times=True)

        # TODO: should expose this from rq-scheduler.
        # TODO: this is really per-queue.
        scheduler_running = (connection.exists(scheduler.scheduler_key) and
            not connection.hexists(scheduler.scheduler_key, 'death'))

    def get_job_graceful(worker):
        if not worker:
            return None
        try:
            return worker.get_current_job()
        except NoSuchJobError:
            return None

    def job_serializer(job):
        if not job:
            return None
        return {
            'id': job.id,
            'description': job.description,
            'created_at': job.created_at,
            'enqueued_at': job.enqueued_at,
            'status': job.get_status(),
            'func_name': job.func_name,
            'args': job.args,
            'kwargs': job.kwargs,
        }

    def scheduled_job_serializer(job):
        # job is actually tuple of (job, datetime)
        if not job:
            return None
        # TODO: job.origin is the scheduler queue originally used to schedule
        # the job. Need to check if this is how the scheduler actually picks
        # which queue to put the job into.
        return {
            'job': job_serializer(job[0]),
            'runtime': job[1],
            'queue': job[0].origin,
        }

    return {
        'queues': queues,
        'workers': [{
            'name': worker.name,
            'state': worker.get_state(),
            'birth': worker.birth_date,
            'queue_names': worker.queue_names(),
            'job': job_serializer(get_job_graceful(worker)),
        } for worker in list(set(workers))],
        'scheduler_installed': scheduler_installed,
        'scheduler_running': 'running' if scheduler_running else 'stopped',
        'scheduled_jobs': [
            scheduled_job_serializer(job) for job in scheduled_jobs
        ]
    }
Ejemplo n.º 46
0
import os
import redis

# from rq import Queue
from rq_scheduler import Scheduler
from datetime import datetime
from app import loop_script

redis_url = os.getenv('REDISCLOUD_URL', 'redis://localhost:6379')
conn = redis.from_url(redis_url)

# q = Queue(connection=conn)
# result = q.enqueue(loop_script)

scheduler = Scheduler(connection=conn)

if len(scheduler.get_jobs()) > 0:
    for job in scheduler.get_jobs():
        scheduler.cancel(job)

scheduler.schedule(scheduled_time=datetime.utcnow(),
                   func=loop_script,
                   interval=30)

print(scheduler.get_jobs())
Ejemplo n.º 47
0
				if setting == setting.upper():
					setattr(self, setting, getattr(mod, setting))

	def __getitem__(self, name): return getattr(self, name)

settings = Settings()

# launch scheduled jobs
import datetime
from brokenpromises.worker     import worker
from rq_scheduler              import Scheduler
from brokenpromises.operations import CollectNext7days, CollectNext2Months, CollectNext2Years, CollectToday, MrClean
import redis
conn           = redis.from_url(settings.REDIS_URL)
scheduler      = Scheduler(connection=conn)
scheduled_jobs = scheduler.get_jobs()
# remove all jobs with interval
for job in scheduled_jobs:
	if "RunAndReplaceIntTheQueuePeriodically" in job.description:
		scheduler.cancel(job)

today = datetime.datetime.now()
# net midnight
next_midnight = today + datetime.timedelta(days=1)
next_midnight = datetime.datetime(next_midnight.year, next_midnight.month, next_midnight.day, 0, 10)
# next month
year          = today.year + (today.month + 1) / 12
month         = today.month % 12 + 1
next_month    = datetime.datetime(year, month, 1, 0, 10)
#next new year
next_year     = datetime.datetime(today.year + 1, 1, 1, 0, 20)
Ejemplo n.º 48
0
class TestScheduler(RQTestCase):

    def setUp(self):
        super(TestScheduler, self).setUp()
        self.scheduler = Scheduler(connection=self.testconn)

    def test_acquire_lock(self):
        """
        When scheduler acquires a lock, besides creating a key, it should
        also set an expiry that's a few seconds longer than it's polling
        interval so it automatically expires if scheduler is unexpectedly
        terminated.
        """
        key = '%s_lock' % Scheduler.scheduler_key
        self.assertNotIn(key, tl(self.testconn.keys('*')))
        scheduler = Scheduler(connection=self.testconn, interval=20)
        self.assertTrue(scheduler.acquire_lock())
        self.assertIn(key, tl(self.testconn.keys('*')))
        self.assertEqual(self.testconn.ttl(key), 30)
        scheduler.remove_lock()
        self.assertNotIn(key, tl(self.testconn.keys('*')))

    def test_no_two_schedulers_acquire_lock(self):
        """
        Ensure that no two schedulers can acquire the lock at the
        same time. When removing the lock, only the scheduler which
        originally acquired the lock can remove the lock.
        """
        key = '%s_lock' % Scheduler.scheduler_key
        self.assertNotIn(key, tl(self.testconn.keys('*')))
        scheduler1 = Scheduler(connection=self.testconn, interval=20)
        scheduler2 = Scheduler(connection=self.testconn, interval=20)
        self.assertTrue(scheduler1.acquire_lock())
        self.assertFalse(scheduler2.acquire_lock())
        self.assertIn(key, tl(self.testconn.keys('*')))
        scheduler2.remove_lock()
        self.assertIn(key, tl(self.testconn.keys('*')))
        scheduler1.remove_lock()
        self.assertNotIn(key, tl(self.testconn.keys('*')))

    def test_create_job(self):
        """
        Ensure that jobs are created properly.
        """
        job = self.scheduler._create_job(say_hello, args=(), kwargs={})
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job, job_from_queue)
        self.assertEqual(job_from_queue.func, say_hello)

    def test_create_job_with_ttl(self):
        """
        Ensure that TTL is passed to RQ.
        """
        job = self.scheduler._create_job(say_hello, ttl=2, args=(), kwargs={})
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(2, job_from_queue.ttl)

    def test_create_job_with_id(self):
        """
        Ensure that ID is passed to RQ.
        """
        job = self.scheduler._create_job(say_hello, id='id test', args=(), kwargs={})
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual('id test', job_from_queue.id)

    def test_create_job_with_description(self):
        """
        Ensure that description is passed to RQ.
        """
        job = self.scheduler._create_job(say_hello, description='description', args=(), kwargs={})
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual('description', job_from_queue.description)

    def test_create_job_with_timeout(self):
        """
        Ensure that timeout is passed to RQ.
        """
        timeout = 13
        job = self.scheduler._create_job(say_hello, timeout=13, args=(), kwargs={})
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(timeout, job_from_queue.timeout)

    def test_job_not_persisted_if_commit_false(self):
        """
        Ensure jobs are only saved to Redis if commit=True.
        """
        job = self.scheduler._create_job(say_hello, commit=False)
        self.assertEqual(self.testconn.hgetall(job.key), {})

    def test_create_scheduled_job(self):
        """
        Ensure that scheduled jobs are put in the scheduler queue with the right score
        """
        scheduled_time = datetime.utcnow()
        job = self.scheduler.enqueue_at(scheduled_time, say_hello)
        self.assertEqual(job, Job.fetch(job.id, connection=self.testconn))
        self.assertIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))
        self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
                         to_unix(scheduled_time))

    def test_create_job_with_meta(self):
        """
        Ensure that meta information on the job is passed to rq
        """
        expected = {'say': 'hello'}
        job = self.scheduler._create_job(say_hello, meta=expected)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(expected, job_from_queue.meta)

    def test_enqueue_at_sets_timeout(self):
        """
        Ensure that a job scheduled via enqueue_at can be created with
        a custom timeout.
        """
        timeout = 13
        job = self.scheduler.enqueue_at(datetime.utcnow(), say_hello, timeout=timeout)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job_from_queue.timeout, timeout)

    def test_enqueue_at_sets_job_id(self):
        """
        Ensure that a job scheduled via enqueue_at can be created with
        a custom job id.
        """
        job_id = 'test_id'
        job = self.scheduler.enqueue_at(datetime.utcnow(), say_hello, job_id=job_id)
        self.assertEqual(job.id, job_id)

    def test_enqueue_at_sets_job_ttl(self):
        """
        Ensure that a job scheduled via enqueue_at can be created with a custom job ttl.
        """
        job_ttl = 123456789
        job = self.scheduler.enqueue_at(datetime.utcnow(), say_hello, job_ttl=job_ttl)
        self.assertEqual(job.ttl, job_ttl)

    def test_enqueue_at_sets_job_result_ttl(self):
        """
        Ensure that a job scheduled via enqueue_at can be created with a custom result ttl.
        """
        job_result_ttl = 1234567890
        job = self.scheduler.enqueue_at(datetime.utcnow(), say_hello, job_result_ttl=job_result_ttl)
        self.assertEqual(job.result_ttl, job_result_ttl)

    def test_enqueue_at_sets_meta(self):
        """
        Ensure that a job scheduled via enqueue_at can be created with a custom meta.
        """
        meta = {'say': 'hello'}
        job = self.scheduler.enqueue_at(datetime.utcnow(), say_hello, meta=meta)
        self.assertEqual(job.meta, meta)

    def test_enqueue_in(self):
        """
        Ensure that jobs have the right scheduled time.
        """
        right_now = datetime.utcnow()
        time_delta = timedelta(minutes=1)
        job = self.scheduler.enqueue_in(time_delta, say_hello)
        self.assertIn(job.id,
                      tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))
        self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
                         to_unix(right_now + time_delta))
        time_delta = timedelta(hours=1)
        job = self.scheduler.enqueue_in(time_delta, say_hello)
        self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
                         to_unix(right_now + time_delta))

    def test_enqueue_in_sets_timeout(self):
        """
        Ensure that a job scheduled via enqueue_in can be created with
        a custom timeout.
        """
        timeout = 13
        job = self.scheduler.enqueue_in(timedelta(minutes=1), say_hello, timeout=timeout)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job_from_queue.timeout, timeout)

    def test_enqueue_in_sets_job_id(self):
        """
        Ensure that a job scheduled via enqueue_in can be created with
        a custom job id.
        """
        job_id = 'test_id'
        job = self.scheduler.enqueue_in(timedelta(minutes=1), say_hello, job_id=job_id)
        self.assertEqual(job.id, job_id)

    def test_enqueue_in_sets_job_ttl(self):
        """
        Ensure that a job scheduled via enqueue_in can be created with a custom job ttl.
        """
        job_ttl = 123456789
        job = self.scheduler.enqueue_in(timedelta(minutes=1), say_hello, job_ttl=job_ttl)
        self.assertEqual(job.ttl, job_ttl)

    def test_enqueue_in_sets_job_result_ttl(self):
        """
        Ensure that a job scheduled via enqueue_in can be created with a custom result ttl.
        """
        job_result_ttl = 1234567890
        job = self.scheduler.enqueue_in(timedelta(minutes=1), say_hello, job_result_ttl=job_result_ttl)
        self.assertEqual(job.result_ttl, job_result_ttl)

    def test_enqueue_in_sets_meta(self):
        """
        Ensure that a job scheduled via enqueue_in sets meta.
        """
        meta = {'say': 'hello'}
        job = self.scheduler.enqueue_in(timedelta(minutes=1), say_hello, meta=meta)
        self.assertEqual(job.meta, meta)

    def test_count(self):
        now = datetime.utcnow()
        self.scheduler.enqueue_at(now, say_hello)
        self.assertEqual(self.scheduler.count(), 1)

        future_time = now + timedelta(hours=1)
        future_test_time = now + timedelta(minutes=59, seconds=59)

        self.scheduler.enqueue_at(future_time, say_hello)

        self.assertEqual(self.scheduler.count(timedelta(minutes=59, seconds=59)), 1)
        self.assertEqual(self.scheduler.count(future_test_time), 1)
        self.assertEqual(self.scheduler.count(), 2)

    def test_get_jobs(self):
        """
        Ensure get_jobs() returns all jobs until the specified time.
        """
        now = datetime.utcnow()
        job = self.scheduler.enqueue_at(now, say_hello)
        self.assertIn(job, self.scheduler.get_jobs(now))
        future_time = now + timedelta(hours=1)
        job = self.scheduler.enqueue_at(future_time, say_hello)
        self.assertIn(job, self.scheduler.get_jobs(timedelta(hours=1, seconds=1)))
        self.assertIn(job, [j[0] for j in self.scheduler.get_jobs(with_times=True)])
        self.assertIsInstance(list(self.scheduler.get_jobs(with_times=True))[0][1], datetime)
        self.assertNotIn(job, self.scheduler.get_jobs(timedelta(minutes=59, seconds=59)))

    def test_get_jobs_slice(self):
        """
        Ensure get_jobs() returns the appropriate slice of all jobs using offset and length.
        """
        now = datetime.utcnow()
        future_time = now + timedelta(hours=1)
        future_test_time = now + timedelta(minutes=59, seconds=59)

        # Schedule each job a second later than the previous job,
        # otherwise Redis will return jobs that have the same scheduled time in
        # lexicographical order (not the order in which we enqueued them)
        now_jobs = [self.scheduler.enqueue_at(now + timedelta(seconds=x), say_hello)
                    for x in range(15)]
        future_jobs = [self.scheduler.enqueue_at(future_time + timedelta(seconds=x), say_hello)
                       for x in range(15)]

        expected_slice = now_jobs[5:] + future_jobs[:10]   # last 10 from now_jobs and first 10 from future_jobs
        expected_until_slice = now_jobs[5:]                # last 10 from now_jobs

        jobs = self.scheduler.get_jobs()
        jobs_slice = self.scheduler.get_jobs(offset=5, length=20)
        jobs_until_slice = self.scheduler.get_jobs(future_test_time, offset=5, length=20)

        self.assertEqual(now_jobs + future_jobs, list(jobs))
        self.assertEqual(expected_slice, list(jobs_slice))
        self.assertEqual(expected_until_slice, list(jobs_until_slice))

    def test_get_jobs_to_queue(self):
        """
        Ensure that jobs scheduled the future are not queued.
        """
        now = datetime.utcnow()
        job = self.scheduler.enqueue_at(now, say_hello)
        self.assertIn(job, self.scheduler.get_jobs_to_queue())
        future_time = now + timedelta(hours=1)
        job = self.scheduler.enqueue_at(future_time, say_hello)
        self.assertNotIn(job, self.scheduler.get_jobs_to_queue())

    def test_enqueue_job(self):
        """
        When scheduled job is enqueued, make sure:
        - Job is removed from the sorted set of scheduled jobs
        - "enqueued_at" attribute is properly set
        - Job appears in the right queue
        - Queue is recognized by rq's Queue.all()
        """
        now = datetime.utcnow()
        queue_name = 'foo'
        scheduler = Scheduler(connection=self.testconn, queue_name=queue_name)

        job = scheduler.enqueue_at(now, say_hello)
        self.scheduler.enqueue_job(job)
        self.assertNotIn(job, tl(self.testconn.zrange(scheduler.scheduled_jobs_key, 0, 10)))
        job = Job.fetch(job.id, connection=self.testconn)
        self.assertTrue(job.enqueued_at is not None)
        queue = scheduler.get_queue_for_job(job)
        self.assertIn(job, queue.jobs)
        queue = Queue.from_queue_key('rq:queue:{0}'.format(queue_name))
        self.assertIn(job, queue.jobs)
        self.assertIn(queue, Queue.all())

    def test_enqueue_job_with_queue(self):
        """
        Ensure that job is enqueued correctly when the scheduler is bound
        to a queue object.
        """
        queue = Queue('foo', connection=self.testconn)
        scheduler = Scheduler(connection=self.testconn, queue=queue)
        job = scheduler._create_job(say_hello)
        scheduler_queue = scheduler.get_queue_for_job(job)
        self.assertEqual(queue, scheduler_queue)
        scheduler.enqueue_job(job)
        self.assertTrue(job.enqueued_at is not None)
        self.assertIn(job, queue.jobs)
        self.assertIn(queue, Queue.all())

    def test_job_membership(self):
        now = datetime.utcnow()
        job = self.scheduler.enqueue_at(now, say_hello)
        self.assertIn(job, self.scheduler)
        self.assertIn(job.id, self.scheduler)
        self.assertNotIn("non-existing-job-id", self.scheduler)

    def test_cancel_scheduled_job(self):
        """
        When scheduled job is canceled, make sure:
        - Job is removed from the sorted set of scheduled jobs
        """
        # schedule a job to be enqueued one minute from now
        time_delta = timedelta(minutes=1)
        job = self.scheduler.enqueue_in(time_delta, say_hello)
        # cancel the scheduled job and check that it's gone from the set
        self.scheduler.cancel(job)
        self.assertNotIn(job.id, tl(self.testconn.zrange(
            self.scheduler.scheduled_jobs_key, 0, 1)))

    def test_change_execution_time(self):
        """
        Ensure ``change_execution_time`` is called, ensure that job's score is updated
        """
        job = self.scheduler.enqueue_at(datetime.utcnow(), say_hello)
        new_date = datetime(2010, 1, 1)
        self.scheduler.change_execution_time(job, new_date)
        self.assertEqual(to_unix(new_date),
            self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id))
        self.scheduler.cancel(job)
        self.assertRaises(ValueError, self.scheduler.change_execution_time, job, new_date)

    def test_args_kwargs_are_passed_correctly(self):
        """
        Ensure that arguments and keyword arguments are properly saved to jobs.
        """
        job = self.scheduler.enqueue_at(datetime.utcnow(), simple_addition, 1, 1, 1)
        self.assertEqual(job.args, (1, 1, 1))
        job = self.scheduler.enqueue_at(datetime.utcnow(), simple_addition, z=1, y=1, x=1)
        self.assertEqual(job.kwargs, {'x': 1, 'y': 1, 'z': 1})
        job = self.scheduler.enqueue_at(datetime.utcnow(), simple_addition, 1, z=1, y=1)
        self.assertEqual(job.kwargs, {'y': 1, 'z': 1})
        self.assertEqual(job.args, (1,))

        time_delta = timedelta(minutes=1)
        job = self.scheduler.enqueue_in(time_delta, simple_addition, 1, 1, 1)
        self.assertEqual(job.args, (1, 1, 1))
        job = self.scheduler.enqueue_in(time_delta, simple_addition, z=1, y=1, x=1)
        self.assertEqual(job.kwargs, {'x': 1, 'y': 1, 'z': 1})
        job = self.scheduler.enqueue_in(time_delta, simple_addition, 1, z=1, y=1)
        self.assertEqual(job.kwargs, {'y': 1, 'z': 1})
        self.assertEqual(job.args, (1,))

    def test_interval_and_repeat_persisted_correctly(self):
        """
        Ensure that interval and repeat attributes are correctly saved.
        """
        job = self.scheduler.schedule(datetime.utcnow(), say_hello, interval=10, repeat=11)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job_from_queue.meta['interval'], 10)
        self.assertEqual(job_from_queue.meta['repeat'], 11)

    def test_crontab_persisted_correctly(self):
        """
        Ensure that crontab attribute gets correctly saved in Redis.
        """
        # create a job that runs one minute past each whole hour
        job = self.scheduler.cron("1 * * * *", say_hello)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job_from_queue.meta['cron_string'], "1 * * * *")

        # get the scheduled_time and convert it to a datetime object
        unix_time = self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id)
        datetime_time = from_unix(unix_time)

        # check that minute=1, seconds=0, and is within an hour
        assert datetime_time.minute == 1
        assert datetime_time.second == 0
        assert datetime_time - datetime.utcnow() < timedelta(hours=1)

    def test_crontab_sets_timeout(self):
        """
        Ensure that a job scheduled via crontab can be created with
        a custom timeout.
        """
        timeout = 13
        job = self.scheduler.cron("1 * * * *", say_hello, timeout=timeout)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job_from_queue.timeout, timeout)

    def test_crontab_sets_id(self):
        """
        Ensure that a job scheduled via crontab can be created with
        a custom id
        """
        job_id = "hello-job-id"
        job = self.scheduler.cron("1 * * * *", say_hello, id=job_id)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job_id, job_from_queue.id)

    def test_crontab_sets_default_result_ttl(self):
        """
        Ensure that a job scheduled via crontab gets proper default
        result_ttl (-1) periodic tasks.
        """
        job = self.scheduler.cron("1 * * * *", say_hello)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(-1, job_from_queue.result_ttl)

    def test_crontab_sets_description(self):
        """
        Ensure that a job scheduled via crontab can be created with
        a custom description
        """
        description = 'test description'
        job = self.scheduler.cron("1 * * * *", say_hello, description=description)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(description, job_from_queue.description)

    def test_repeat_without_interval_raises_error(self):
        # Ensure that an error is raised if repeat is specified without interval
        def create_job():
            self.scheduler.schedule(datetime.utcnow(), say_hello, repeat=11)
        self.assertRaises(ValueError, create_job)

    def test_job_with_intervals_get_rescheduled(self):
        """
        Ensure jobs with interval attribute are put back in the scheduler
        """
        time_now = datetime.utcnow()
        interval = 10
        job = self.scheduler.schedule(time_now, say_hello, interval=interval)
        self.scheduler.enqueue_job(job)
        self.assertIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))
        self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
                         to_unix(time_now) + interval)

    def test_job_with_interval_can_set_meta(self):
        """
        Ensure that jobs with interval attribute can be created with meta
        """
        time_now = datetime.utcnow()
        interval = 10
        meta = {'say': 'hello'}
        job = self.scheduler.schedule(time_now, say_hello, interval=interval, meta=meta)
        self.scheduler.enqueue_job(job)
        self.assertEqual(job.meta, meta)

    def test_job_with_crontab_get_rescheduled(self):
        # Create a job with a cronjob_string
        job = self.scheduler.cron("1 * * * *", say_hello)

        # current unix_time
        old_next_scheduled_time = self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id)

        # change crontab
        job.meta['cron_string'] = "2 * * * *"

        # enqueue the job
        self.scheduler.enqueue_job(job)

        self.assertIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))

        # check that next scheduled time has changed
        self.assertNotEqual(old_next_scheduled_time,
                            self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id))

        # check that new next scheduled time is set correctly
        expected_next_scheduled_time = to_unix(get_next_scheduled_time("2 * * * *"))
        self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
                         expected_next_scheduled_time)

    def test_job_with_repeat(self):
        """
        Ensure jobs with repeat attribute are put back in the scheduler
        X (repeat) number of times
        """
        time_now = datetime.utcnow()
        interval = 10
        # If job is repeated once, the job shouldn't be put back in the queue
        job = self.scheduler.schedule(time_now, say_hello, interval=interval, repeat=1)
        self.scheduler.enqueue_job(job)
        self.assertNotIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))

        # If job is repeated twice, it should only be put back in the queue once
        job = self.scheduler.schedule(time_now, say_hello, interval=interval, repeat=2)
        self.scheduler.enqueue_job(job)
        self.assertIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))
        self.scheduler.enqueue_job(job)
        self.assertNotIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))

    def test_missing_jobs_removed_from_scheduler(self):
        """
        Ensure jobs that don't exist when queued are removed from the scheduler.
        """
        job = self.scheduler.schedule(datetime.utcnow(), say_hello)
        job.cancel()
        list(self.scheduler.get_jobs_to_queue())
        self.assertIn(job.id, tl(self.testconn.zrange(
            self.scheduler.scheduled_jobs_key, 0, 1)))
        job.delete()
        list(self.scheduler.get_jobs_to_queue())
        self.assertNotIn(job.id, tl(self.testconn.zrange(
            self.scheduler.scheduled_jobs_key, 0, 1)))

    def test_periodic_jobs_sets_result_ttl(self):
        """
        Ensure periodic jobs set result_ttl to infinite.
        """
        job = self.scheduler.schedule(datetime.utcnow(), say_hello, interval=5)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job.result_ttl, -1)

    def test_periodic_jobs_sets_ttl(self):
        """
        Ensure periodic jobs sets correctly ttl.
        """
        job = self.scheduler.schedule(datetime.utcnow(), say_hello, interval=5, ttl=4)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job.ttl, 4)

    def test_periodic_jobs_sets_meta(self):
        """
        Ensure periodic jobs sets correctly meta.
        """
        meta = {'say': 'hello'}
        job = self.scheduler.schedule(datetime.utcnow(), say_hello, interval=5, meta=meta)
        self.assertEqual(meta, job.meta)

    def test_periodic_job_sets_id(self):
        """
        Ensure that ID is passed to RQ by schedule.
        """
        job = self.scheduler.schedule(datetime.utcnow(), say_hello, interval=5, id='id test')
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual('id test', job.id)

    def test_periodic_job_sets_description(self):
        """
        Ensure that description is passed to RQ by schedule.
        """
        job = self.scheduler.schedule(datetime.utcnow(), say_hello, interval=5, description='description')
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual('description', job.description)

    def test_run(self):
        """
        Check correct signal handling in Scheduler.run().
        """
        def send_stop_signal():
            """
            Sleep for 1 second, then send a INT signal to ourself, so the
            signal handler installed by scheduler.run() is called.
            """
            time.sleep(1)
            os.kill(os.getpid(), signal.SIGINT)
        thread = Thread(target=send_stop_signal)
        thread.start()
        self.assertRaises(SystemExit, self.scheduler.run)
        thread.join()

    def test_run_burst(self):
        """
        Check burst mode of Scheduler.run().
        """
        now = datetime.utcnow()
        job = self.scheduler.enqueue_at(now, say_hello)
        self.assertIn(job, self.scheduler.get_jobs_to_queue())
        self.assertEqual(len(list(self.scheduler.get_jobs())), 1)
        self.scheduler.run(burst=True)
        self.assertEqual(len(list(self.scheduler.get_jobs())), 0)

    def test_scheduler_w_o_explicit_connection(self):
        """
        Ensure instantiating Scheduler w/o explicit connection works.
        """
        s = Scheduler()
        self.assertEqual(s.connection, self.testconn)

    def test_small_float_interval(self):
        """
        Test that scheduler accepts 'interval' of type float, less than 1 second.
        """
        key = Scheduler.scheduler_key
        lock_key = '%s_lock' % Scheduler.scheduler_key
        self.assertNotIn(key, tl(self.testconn.keys('*')))
        scheduler = Scheduler(connection=self.testconn, interval=0.1)   # testing interval = 0.1 second
        self.assertEqual(scheduler._interval, 0.1)

        #acquire lock
        self.assertTrue(scheduler.acquire_lock())
        self.assertIn(lock_key, tl(self.testconn.keys('*')))
        self.assertEqual(self.testconn.ttl(lock_key), 10)  # int(0.1) + 10 = 10

        #enqueue a job
        now = datetime.utcnow()
        job = scheduler.enqueue_at(now, say_hello)
        self.assertIn(job, self.scheduler.get_jobs_to_queue())
        self.assertEqual(len(list(self.scheduler.get_jobs())), 1)

        #remove the lock
        scheduler.remove_lock()

        #test that run works with the small floating-point interval
        def send_stop_signal():
            """
            Sleep for 1 second, then send a INT signal to ourself, so the
            signal handler installed by scheduler.run() is called.
            """
            time.sleep(1)
            os.kill(os.getpid(), signal.SIGINT)
        thread = Thread(target=send_stop_signal)
        thread.start()
        self.assertRaises(SystemExit, scheduler.run)
        thread.join()

        #all jobs must have been scheduled during 1 second
        self.assertEqual(len(list(scheduler.get_jobs())), 0)