Пример #1
0
def start_synchronization(restart_q_name, path_q_name, file_q_name, target,
                          root, interval, job_name, hdlr):

    root_abs = realpath(root)

    r = StrictRedis()
    scheduler = Scheduler(connection=r)

    if job_name in scheduler:
        logger.error("job exists")
        return

    if interval is not None:
        scheduler.schedule(
            scheduled_time=datetime.utcnow(),
            func=restart,
            args=[path_q_name, file_q_name, target, root_abs, root_abs, hdlr],
            interval=interval,
            queue_name=restart_q_name,
            id=job_name)
    else:
        restart_q = Queue(restart_q_name, connection=r)
        restart_q.enqueue(restart,
                          path_q_name,
                          file_q_name,
                          target,
                          root_abs,
                          root_abs,
                          hdlr,
                          job_id=job_name)
Пример #2
0
def schedule():
    """Creates scheduler object."""
    build_scheduler = Scheduler(connection=WorkerQueues.connection)
    log.info('scheduler created')

    cleanup_interval = int(os.getenv('RENKU_SVC_CLEANUP_INTERVAL', 60))
    log.info('cleanup interval set to {}'.format(cleanup_interval))

    build_scheduler.schedule(
        scheduled_time=datetime.utcnow(),
        queue_name=CLEANUP_QUEUE_FILES,
        func=cache_files_cleanup,
        interval=cleanup_interval,
        result_ttl=cleanup_interval + 1,
    )

    build_scheduler.schedule(
        scheduled_time=datetime.utcnow(),
        queue_name=CLEANUP_QUEUE_PROJECTS,
        func=cache_project_cleanup,
        interval=cleanup_interval,
        result_ttl=cleanup_interval + 1,
    )

    log_level = os.getenv('RQ_WORKER_LOG_LEVEL', 'INFO')
    setup_loghandlers(log_level)
    log.info('log level set to {}'.format(log_level))

    yield build_scheduler
Пример #3
0
def add_scheduled_task(request):

    task = request.GET.get('task')
    period = request.GET.get('period')
    queue = request.GET.get('queue')
    parameters = request.GET.get('parameters')

    from rq import use_connection
    from rq_scheduler import Scheduler
    from datetime import datetime

    use_connection()  # Use RQ's default Redis connection
    scheduler = Scheduler(queue)  # Get a scheduler for the "default" queue

    if parameters:
        scheduler.schedule(
            scheduled_time=datetime.now(),  # Time for first execution
            func=getattr(tasks, task),  # Function to be queued
            args=[int(parameters)],
            interval=
            period,  # Time before the function is called again, in seconds
            repeat=
            None  # Repeat this number of times (None means repeat forever)
        )
    else:
        scheduler.schedule(
            scheduled_time=datetime.now(),  # Time for first execution
            func=getattr(tasks, task),  # Function to be queued
            interval=
            period,  # Time before the function is called again, in seconds
            repeat=
            None  # Repeat this number of times (None means repeat forever)
        )
    return HttpResponse('Success')
Пример #4
0
def add_scheduled_task(request):

    task = request.GET.get('task')
    period = request.GET.get('period')
    queue = request.GET.get('queue')
    parameters = request.GET.get('parameters')


    from rq import use_connection
    from rq_scheduler import Scheduler
    from datetime import datetime

    use_connection() # Use RQ's default Redis connection
    scheduler = Scheduler(queue) # Get a scheduler for the "default" queue

    if parameters:
        scheduler.schedule(
            scheduled_time=datetime.now(),   # Time for first execution
            func=getattr(tasks, task),       # Function to be queued
            args=[int(parameters)],
            interval=period,                 # Time before the function is called again, in seconds
            repeat=None                      # Repeat this number of times (None means repeat forever)
        )
    else:
        scheduler.schedule(
            scheduled_time=datetime.now(),   # Time for first execution
            func=getattr(tasks, task),           # Function to be queued
            interval=period,                 # Time before the function is called again, in seconds
            repeat=None                      # Repeat this number of times (None means repeat forever)
        )
    return HttpResponse('Success')
Пример #5
0
def add_scheduled_task(request):
    from rq_scheduler import Scheduler
    from datetime import datetime

    task = request.GET.get('task')
    period = request.GET.get('period')
    queue = request.GET.get('queue')
    parameters = request.GET.get('parameters')
    scheduler = Scheduler(queue_name=queue, connection=tasks.redis_conn)

    if parameters:
        scheduler.schedule(
            scheduled_time=datetime.utcnow(),   # Time for first execution
            func=getattr(tasks, task),       # Function to be queued
            args=(parameters,),
            # Time before the function is called again, in seconds
            interval=int(period),
            # Repeat this number of times (None means repeat forever)
            repeat=None
        )
    else:
        scheduler.schedule(
            scheduled_time=datetime.utcnow(),   # Time for first execution
            func=getattr(tasks, task),       # Function to be queued
            # Time before the function is called again, in seconds
            interval=int(period),
            # Repeat this number of times (None means repeat forever)
            repeat=None
        )
    return HttpResponse('Success')
Пример #6
0
def start_fetching():
    scheduler = Scheduler(connection=get_connection())
    from app.tasks import fetch_data
    scheduler.schedule(scheduled_time=datetime.utcnow(),
                       func=fetch_data,
                       interval=int(60),
                       repeat=None)
Пример #7
0
    def handle(self, *args, **options):

        with Connection(redis.Redis(**settings.RQ_DATABASE)):
            scheduler = Scheduler('rss_collector')

            jobs = scheduler.get_jobs()
            for job in jobs:
                if job.func_name != 'collector.rss.fetch_rss':
                    continue

                if options.get('replace'):
                    job.cancel()
                    break
                else:
                    raise CommandError('RSS collector task already scheduled')

            try:
                scheduler.schedule(
                    datetime.datetime.now(),
                    fetch_rss,
                    interval=1200,
                    repeat=20000,
                )
            except redis.exceptions.ConnectionError:
                raise CommandError('Redis did not respond')
Пример #8
0
 def generate(self):
     scheduler = Scheduler(connection=Redis())
     try:
         scheduler.schedule(datetime.utcnow(),
                            func=check_today_lessons,
                            interval=60 * 60 * 24)
     except ConnectionError:
         print('Redis server is not available')
Пример #9
0
def schedule_jobs():
    with Connection(redis.from_url(app.config['REDIS_URL'])) as conn:
        q = Queue()
        scheduler = Scheduler(queue=q)
        scheduler.schedule(scheduled_time=datetime.datetime.utcnow(),
                           func=import_fires,
                           interval=5 * 60,
                           repeat=3)
Пример #10
0
def job_reset_stats_queue(queuename, when, hour):
    scheduler = Scheduler(connection=Redis())
    remove_jobs_not_config()
    if not exists_job_onqueue(queuename, when, hour):
        scheduler.schedule(scheduled_time=datetime_from_config(when, hour),
                           func=reset_stats_queue,
                           args=[queuename, when, hour],
                           interval=seconds_from_config_interval(when))
Пример #11
0
def main():
    parser = argparse.ArgumentParser()

    parser.add_argument("experiment", help="The experiment")

    parser.add_argument("job",
                        choices=["intervene", "tidy"],
                        help="The job associated with the experiment")

    parser.add_argument(
        "interval",
        default=120,  # default 2 minutes
        help="Interval between tasks in seconds (default 2 minutes)")
    parser.add_argument(
        "-e",
        '--env',
        choices=['development', 'test', 'production'],
        required=False,
        help=
        "Run within a specific environment. Otherwise run under the environment defined in the environment variable CS_ENV"
    )

    args = parser.parse_args()

    # if the user specified the environment, set it here
    if args.env != None:
        os.environ['CS_ENV'] = args.env

    queue_name = os.environ['CS_ENV']
    scheduler = Scheduler(queue_name=os.environ['CS_ENV'], connection=Redis())

    ttl = 172800  ## two days in seconds
    if (ttl <= int(args.interval) + 3600):
        ttl = int(args.interval) + 3600

    experiment_file = os.path.join(
        BASE_DIR, "config", "experiments") + "/" + args.experiment + ".yml"
    if (os.path.isfile(experiment_file) == False):
        print("File {0} not found. Ignoring schedule command.".format(
            experiment_file))
        sys.exit(1)

    if (args.job == "intervene"):
        scheduler.schedule(
            scheduled_time=datetime.utcnow(),
            func=app.controller.conduct_sticky_comment_experiment,
            args=[args.experiment],
            interval=int(args.interval),
            repeat=None,
            result_ttl=ttl)
    elif (args.job == "tidy"):
        scheduler.schedule(scheduled_time=datetime.utcnow(),
                           func=app.controller.remove_experiment_replies,
                           args=[args.experiment],
                           interval=int(args.interval),
                           repeat=None,
                           result_ttl=ttl)
Пример #12
0
 def test_rq(self):
     # queue = django_rq.get_queue('default')
     # queue.enqueue(parse_feeds)
     redis_conn = django_rq.get_connection
     scheduler = Scheduler(connection=redis_conn)
     scheduler.schedule(scheduled_time=datetime.datetime.utcnow() +
                        datetime.timedelta(seconds=5),
                        func=parse_feeds,
                        interval=60)
Пример #13
0
def create_schedules():
    from towerdashboard.jobs import refresh_github_branches
    scheduler = Scheduler(connection=Redis('redis'))
    for j in scheduler.get_jobs():
        scheduler.cancel(j)

    scheduler.schedule(scheduled_time=datetime.utcnow(),
                       func=refresh_github_branches,
                       interval=120, repeat=None, result_ttl=120)
Пример #14
0
def job_reset_stats_queue(queuename, when, hour):
    scheduler = Scheduler(connection=Redis())
    remove_jobs_not_config()
    if not exists_job_onqueue(queuename, when, hour):
        scheduler.schedule(
            scheduled_time=datetime_from_config(when, hour),
            func=reset_stats_queue,
            args=[queuename, when, hour],
            interval=seconds_from_config_interval(when)
        )
Пример #15
0
    def handle(self, *args, **options):
        queue = rq.Queue('rq_log', connection=Redis())
        queue.enqueue(rq_task)

        scheduler = Scheduler(queue=queue, connection=Redis())
        scheduler.schedule(scheduled_time=datetime.utcnow(),
                           func=rq_task,
                           interval=5,
                           repeat=1)
        scheduler.run()
Пример #16
0
def loop_check_is_any_new_file():
    with rq.Connection(redis.from_url(settings.REDIS_URL)):
        q = rq.Queue()
        scheduler = Scheduler(queue=q)
        scheduler.schedule(
            scheduled_time=datetime.utcnow(
            ),  # Time for first execution, in UTC timezone
            func=check_is_any_new_file,
            interval=600,  # Time before the function is called again, in seconds
        )
Пример #17
0
def register_scheduler():
    scheduler = Scheduler('lidarts-bulk', connection=Redis())
    list_of_job_instances = scheduler.get_jobs()
    for job in list_of_job_instances:
        scheduler.cancel(job)
    scheduler.schedule(
        scheduled_time=datetime.utcnow(),
        func='lidarts.tasks.bulk_update_last_seen',
        interval=5,
        repeat=None,
        ttl=10,
    )
Пример #18
0
class Command(BaseCommand):
    def __init__(self, *args, **kwargs):
        super(Command, self).__init__(*args, **kwargs)
        self.stdout.write('%s running payment release' % datetime.now(),
                          ending='\n')
        self.scheduler = Scheduler(connection=Redis('redis'))

    def handle(self, *args, **options):
        self.scheduler.schedule(
            scheduled_time=datetime.utcnow(),
            func=check_release,
            interval=1,
            repeat=None
        )
Пример #19
0
    def partial_update(self, request, *args, **kwargs):
        print("PERFORM PARTIAL UPDATE !!!!!!!!")
        response = super(PeriodicJobViewSet,
                         self).partial_update(request, *args, **kwargs)

        script = self.get_object()
        print("------------>", script.interval)

        # scheduler add or delete

        print("scheduler !!!")

        job_id = get_job_id(script.id)
        scheduler = Scheduler("default", connection=Redis())

        print("job_id", job_id)
        job = None
        try:
            redis = Redis()
            job = Job.fetch(job_id, connection=redis)
            print("there is a job", job)

            scheduler.cancel(job)
            print("Cancel the job")

        except NoSuchJobError as e:
            job = None
        finally:
            job = None

        if job is None and script.interval > 0:
            print("Add for schedule ...")
            scheduler.schedule(
                scheduled_time=datetime.utcnow(
                ),  # Time for first execution, in UTC timezone
                func=func3,  # Function to be queued
                args=[{
                    "script_id": script.id
                }],  # Arguments passed into function when executed
                id=job_id,
                interval=script.
                interval,  # Time before the function is called again, in seconds
                repeat=
                None,  # Repeat this number of times (None means repeat forever)
                result_ttl=script.interval * 2,
            )
            #  do not set a result_ttl value or you set a value larger than the interval

        return response
Пример #20
0
    def addModule(self, mod):
        logger.info('Adding module to hub {}'.format(mod['name']))
        # Store the module object as a document
        m = RedisModule(self.dconn, self.sconn, self.autocomplete, mod['name'])
        m.save(mod)

        # Add a reference to it in the master catalog
        self.dconn.jsonset(
            self._hubkey, Path('.modules["{}"]'.format(m.get_id())), {
                'id': m.get_id(),
                'key': m.get_key(),
                'created': str(_toepoch(self._ts)),
            })

        # Schedule a job to refresh repository statistics, starting from now and every hour
        s = Scheduler(connection=self.qconn)
        job = s.schedule(
            scheduled_time=datetime(1970, 1, 1),
            func=callRedisModuleUpateStats,
            args=[m.get_id()],
            interval=60 * 60,  # every hour
            repeat=None,  # indefinitely
            ttl=0,
            result_ttl=0)
        return m
Пример #21
0
def start_jobs():
    """
        Check if processs enqueue_reset_stats is working on queue if not
        enqueue function
    """
    start_enqueue_reset_stats = False
    scheduler = Scheduler(connection=Redis())
    jobs = scheduler.get_jobs()
    for job in jobs:
        if 'enqueue_reset_stats' in job.func_name:
            start_enqueue_reset_stats = True
            break

    if start_enqueue_reset_stats is False:
        scheduler.schedule(scheduled_time=datetime.datetime.utcnow(),
                           func=enqueue_reset_stats,
                           interval=60)
Пример #22
0
def start_jobs():
    """
        Check if processs enqueue_reset_stats is working on queue if not
        enqueue function
    """
    start_enqueue_reset_stats = False
    scheduler = Scheduler(connection=Redis())
    jobs = scheduler.get_jobs()
    for job in jobs:
        if 'enqueue_reset_stats' in job.func_name:
            start_enqueue_reset_stats = True
            break

    if start_enqueue_reset_stats is False:
        scheduler.schedule(
            scheduled_time=datetime.datetime.utcnow(),
            func=enqueue_reset_stats,
            interval=60
        )
Пример #23
0
def schedule():
    """Creates scheduler object."""
    setup_loghandlers(level=DEPLOYMENT_LOG_LEVEL)

    build_scheduler = Scheduler(connection=WorkerQueues.connection)

    scheduler_log.info("scheduler created")

    cleanup_interval = int(os.getenv("RENKU_SVC_CLEANUP_INTERVAL", 60))
    scheduler_log.info("cleanup interval set to {}".format(cleanup_interval))

    def requeue(*args, **kwargs):
        """Inverval check for scheduled jobs."""
        job = args[0]

        queue = Scheduler.get_queue_for_job(build_scheduler, job)
        scheduler_log.info(
            f"job {job.id}:{job.func_name} re/queued to {queue.name}")

        return queue

    # NOTE: Patch scheduler to have requeing information on INFO log level.
    build_scheduler.get_queue_for_job = requeue

    build_scheduler.schedule(
        scheduled_time=datetime.utcnow(),
        queue_name=CLEANUP_QUEUE_FILES,
        func=cache_files_cleanup,
        interval=cleanup_interval,
        result_ttl=cleanup_interval + 1,
    )

    build_scheduler.schedule(
        scheduled_time=datetime.utcnow(),
        queue_name=CLEANUP_QUEUE_PROJECTS,
        func=cache_project_cleanup,
        interval=cleanup_interval,
        result_ttl=cleanup_interval + 1,
    )

    scheduler_log.info(f"log level set to {DEPLOYMENT_LOG_LEVEL}")
    yield build_scheduler
Пример #24
0
def start_data_manager():
    print("Starting BG Workers")
    print("Intitializing Redis Queue")
    if os.getenv('REDIS_URL'):
        redis_conn = Redis.from_url(os.getenv('REDIS_URL'), decode_responses=True)
    else:
        redis_conn = Redis()
    print("Connection Made")

    try:
        scheduler = Scheduler(connection=redis_conn)
    except:
        print("No Redis connection possible, exiting...")
        sys.exit(1)

    scheduler.schedule(
        scheduled_time=datetime.utcnow(),      # Time for first execution, in UTC timezone
        func=refresh_data,                     # Function to be queued
        interval=30,                            # Time before the function is called again, in seconds
    )
Пример #25
0
def setup_scheduler(func, repeat_every=60):
    r = get_connection()
    scheduled_job_id = r.get(KEY)

    scheduler = Scheduler(connection=r)
    if scheduled_job_id:
        logger.info(f'Canceling old job {scheduled_job_id}')
        scheduler.cancel(
            scheduled_job_id)  # schedule old job before scheduling a new one

    job = scheduler.schedule(
        scheduled_time=datetime.utcnow(
        ),  # Time for first execution, in UTC timezone
        func=func,  # Function to be queued
        interval=
        repeat_every,  # Time before the function is called again, in seconds
        repeat=None  # Repeat this number of times (None means repeat forever)
    )
    logger.info("Scheduled function %s to be executed every %s seconds" %
                (func.__name__, repeat_every))
    r.set(KEY, job.id)
Пример #26
0
from proxstar.vm import VM
from proxstar.user import User
from proxstar.tasks import (
    generate_pool_cache_task,
    process_expiring_vms_task,
    cleanup_vnc_task,
    delete_vm_task,
    create_vm_task,
    setup_template_task,
)

if 'generate_pool_cache' not in scheduler:
    logging.info('adding generate pool cache task to scheduler')
    scheduler.schedule(
        id='generate_pool_cache',
        scheduled_time=datetime.datetime.utcnow(),
        func=generate_pool_cache_task,
        interval=90,
    )

if 'process_expiring_vms' not in scheduler:
    logging.info('adding process expiring VMs task to scheduler')
    scheduler.cron('0 5 * * *',
                   id='process_expiring_vms',
                   func=process_expiring_vms_task)

if 'cleanup_vnc' not in scheduler:
    logging.info('adding cleanup VNC task to scheduler')
    scheduler.schedule(
        id='cleanup_vnc',
        scheduled_time=datetime.datetime.utcnow(),
        func=cleanup_vnc_task,
Пример #27
0
from redis import Redis
from rq_scheduler import Scheduler
from datetime import datetime
from tasks import save_exchange_rates

redis_conn = Redis()
scheduler = Scheduler(connection=redis_conn)

scheduler.schedule(
    datetime.utcnow(),
    save_exchange_rates,
    interval=10,
    repeat=None,
)
Пример #28
0
class TestScheduler(RQTestCase):

    def setUp(self):
        super(TestScheduler, self).setUp()
        self.scheduler = Scheduler(connection=self.testconn)

    def test_birth_and_death_registration(self):
        """
        When scheduler registers it's birth, besides creating a key, it should
        also set an expiry that's a few seconds longer than it's polling
        interval so it automatically expires if scheduler is unexpectedly
        terminated.
        """
        key = Scheduler.scheduler_key
        self.assertNotIn(key, tl(self.testconn.keys('*')))
        scheduler = Scheduler(connection=self.testconn, interval=20)
        scheduler.register_birth()
        self.assertIn(key, tl(self.testconn.keys('*')))
        self.assertEqual(self.testconn.ttl(key), 30)
        self.assertFalse(self.testconn.hexists(key, 'death'))
        self.assertRaises(ValueError, scheduler.register_birth)
        scheduler.register_death()
        self.assertTrue(self.testconn.hexists(key, 'death'))

    def test_create_job(self):
        """
        Ensure that jobs are created properly.
        """
        job = self.scheduler._create_job(say_hello, args=(), kwargs={})
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job, job_from_queue)
        self.assertEqual(job_from_queue.func, say_hello)

    def test_create_job_with_ttl(self):
        """
        Ensure that TTL is passed to RQ.
        """
        job = self.scheduler._create_job(say_hello, ttl=2, args=(), kwargs={})
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(2, job_from_queue.ttl)

    def test_create_job_with_id(self):
        """
        Ensure that ID is passed to RQ.
        """
        job = self.scheduler._create_job(say_hello, id='id test', args=(), kwargs={})
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual('id test', job_from_queue.id)

    def test_create_job_with_description(self):
        """
        Ensure that description is passed to RQ.
        """
        job = self.scheduler._create_job(say_hello, description='description', args=(), kwargs={})
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual('description', job_from_queue.description)

    def test_job_not_persisted_if_commit_false(self):
        """
        Ensure jobs are only saved to Redis if commit=True.
        """
        job = self.scheduler._create_job(say_hello, commit=False)
        self.assertEqual(self.testconn.hgetall(job.key), {})

    def test_create_scheduled_job(self):
        """
        Ensure that scheduled jobs are put in the scheduler queue with the right score
        """
        scheduled_time = datetime.utcnow()
        job = self.scheduler.enqueue_at(scheduled_time, say_hello)
        self.assertEqual(job, Job.fetch(job.id, connection=self.testconn))
        self.assertIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))
        self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
                         to_unix(scheduled_time))

    def test_enqueue_in(self):
        """
        Ensure that jobs have the right scheduled time.
        """
        right_now = datetime.utcnow()
        time_delta = timedelta(minutes=1)
        job = self.scheduler.enqueue_in(time_delta, say_hello)
        self.assertIn(job.id,
                      tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))
        self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
                         to_unix(right_now + time_delta))
        time_delta = timedelta(hours=1)
        job = self.scheduler.enqueue_in(time_delta, say_hello)
        self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
                         to_unix(right_now + time_delta))

    def test_get_jobs(self):
        """
        Ensure get_jobs() returns all jobs until the specified time.
        """
        now = datetime.utcnow()
        job = self.scheduler.enqueue_at(now, say_hello)
        self.assertIn(job, self.scheduler.get_jobs(now))
        future_time = now + timedelta(hours=1)
        job = self.scheduler.enqueue_at(future_time, say_hello)
        self.assertIn(job, self.scheduler.get_jobs(timedelta(hours=1, seconds=1)))
        self.assertIn(job, [j[0] for j in self.scheduler.get_jobs(with_times=True)])
        self.assertIsInstance(self.scheduler.get_jobs(with_times=True)[0][1], datetime)
        self.assertNotIn(job, self.scheduler.get_jobs(timedelta(minutes=59, seconds=59)))

    def test_get_jobs_to_queue(self):
        """
        Ensure that jobs scheduled the future are not queued.
        """
        now = datetime.utcnow()
        job = self.scheduler.enqueue_at(now, say_hello)
        self.assertIn(job, self.scheduler.get_jobs_to_queue())
        future_time = now + timedelta(hours=1)
        job = self.scheduler.enqueue_at(future_time, say_hello)
        self.assertNotIn(job, self.scheduler.get_jobs_to_queue())

    def test_enqueue_job(self):
        """
        When scheduled job is enqueued, make sure:
        - Job is removed from the sorted set of scheduled jobs
        - "enqueued_at" attribute is properly set
        - Job appears in the right queue
        - Queue is recognized by rq's Queue.all()
        """
        now = datetime.utcnow()
        queue_name = 'foo'
        scheduler = Scheduler(connection=self.testconn, queue_name=queue_name)

        job = scheduler.enqueue_at(now, say_hello)
        self.scheduler.enqueue_job(job)
        self.assertNotIn(job, tl(self.testconn.zrange(scheduler.scheduled_jobs_key, 0, 10)))
        job = Job.fetch(job.id, connection=self.testconn)
        self.assertTrue(job.enqueued_at is not None)
        queue = scheduler.get_queue_for_job(job)
        self.assertIn(job, queue.jobs)
        queue = Queue.from_queue_key('rq:queue:{0}'.format(queue_name))
        self.assertIn(job, queue.jobs)
        self.assertIn(queue, Queue.all())

    def test_job_membership(self):
        now = datetime.utcnow()
        job = self.scheduler.enqueue_at(now, say_hello)
        self.assertIn(job, self.scheduler)
        self.assertIn(job.id, self.scheduler)
        self.assertNotIn("non-existing-job-id", self.scheduler)

    def test_cancel_scheduled_job(self):
        """
        When scheduled job is canceled, make sure:
        - Job is removed from the sorted set of scheduled jobs
        """
        # schedule a job to be enqueued one minute from now
        time_delta = timedelta(minutes=1)
        job = self.scheduler.enqueue_in(time_delta, say_hello)
        # cancel the scheduled job and check that it's gone from the set
        self.scheduler.cancel(job)
        self.assertNotIn(job.id, tl(self.testconn.zrange(
            self.scheduler.scheduled_jobs_key, 0, 1)))

    def test_change_execution_time(self):
        """
        Ensure ``change_execution_time`` is called, ensure that job's score is updated
        """
        job = self.scheduler.enqueue_at(datetime.utcnow(), say_hello)
        new_date = datetime(2010, 1, 1)
        self.scheduler.change_execution_time(job, new_date)
        self.assertEqual(to_unix(new_date),
            self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id))
        self.scheduler.cancel(job)
        self.assertRaises(ValueError, self.scheduler.change_execution_time, job, new_date)

    def test_args_kwargs_are_passed_correctly(self):
        """
        Ensure that arguments and keyword arguments are properly saved to jobs.
        """
        job = self.scheduler.enqueue_at(datetime.utcnow(), simple_addition, 1, 1, 1)
        self.assertEqual(job.args, (1, 1, 1))
        job = self.scheduler.enqueue_at(datetime.utcnow(), simple_addition, z=1, y=1, x=1)
        self.assertEqual(job.kwargs, {'x': 1, 'y': 1, 'z': 1})
        job = self.scheduler.enqueue_at(datetime.utcnow(), simple_addition, 1, z=1, y=1)
        self.assertEqual(job.kwargs, {'y': 1, 'z': 1})
        self.assertEqual(job.args, (1,))

        time_delta = timedelta(minutes=1)
        job = self.scheduler.enqueue_in(time_delta, simple_addition, 1, 1, 1)
        self.assertEqual(job.args, (1, 1, 1))
        job = self.scheduler.enqueue_in(time_delta, simple_addition, z=1, y=1, x=1)
        self.assertEqual(job.kwargs, {'x': 1, 'y': 1, 'z': 1})
        job = self.scheduler.enqueue_in(time_delta, simple_addition, 1, z=1, y=1)
        self.assertEqual(job.kwargs, {'y': 1, 'z': 1})
        self.assertEqual(job.args, (1,))

    def test_enqueue_is_deprecated(self):
        """
        Ensure .enqueue() throws a DeprecationWarning
        """
        with warnings.catch_warnings(record=True) as w:
            # Enable all warnings
            warnings.simplefilter("always")
            job = self.scheduler.enqueue(datetime.utcnow(), say_hello)
            self.assertEqual(1, len(w))
            self.assertEqual(w[0].category, DeprecationWarning)

    def test_enqueue_periodic(self):
        """
        Ensure .enqueue_periodic() throws a DeprecationWarning
        """
        with warnings.catch_warnings(record=True) as w:
            # Enable all warnings
            warnings.simplefilter("always")
            job = self.scheduler.enqueue_periodic(datetime.utcnow(), 1, None, say_hello)
            self.assertEqual(1, len(w))
            self.assertEqual(w[0].category, DeprecationWarning)

    def test_interval_and_repeat_persisted_correctly(self):
        """
        Ensure that interval and repeat attributes get correctly saved in Redis.
        """
        job = self.scheduler.schedule(datetime.utcnow(), say_hello, interval=10, repeat=11)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job_from_queue.meta['interval'], 10)
        self.assertEqual(job_from_queue.meta['repeat'], 11)

    def test_crontab_persisted_correctly(self):
        """
        Ensure that crontab attribute gets correctly saved in Redis.
        """
        # create a job that runs one minute past each whole hour
        job = self.scheduler.cron("1 * * * *", say_hello)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job_from_queue.meta['cron_string'], "1 * * * *")

        # get the scheduled_time and convert it to a datetime object
        unix_time = self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id)
        datetime_time = from_unix(unix_time)

        # check that minute=1, seconds=0, and is within an hour
        assert datetime_time.minute == 1
        assert datetime_time.second == 0
        assert datetime_time - datetime.utcnow() < timedelta(hours=1)

    def test_crontab_sets_timeout(self):
        """
        Ensure that a job scheduled via crontab can be created with
        a custom timeout.
        """
        timeout = 13
        job = self.scheduler.cron("1 * * * *", say_hello, timeout=timeout)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job_from_queue.timeout, timeout)

    def test_crontab_sets_id(self):
        """
        Ensure that a job scheduled via crontab can be created with
        a custom id
        """
        job_id = "hello-job-id"
        job = self.scheduler.cron("1 * * * *", say_hello, id=job_id)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job_id, job_from_queue.id)

    def test_crontab_sets_default_result_ttl(self):
        """
        Ensure that a job scheduled via crontab gets proper default
        result_ttl (-1) periodic tasks.
        """
        job = self.scheduler.cron("1 * * * *", say_hello)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(-1, job_from_queue.result_ttl)

    def test_crontab_sets_description(self):
        """
        Ensure that a job scheduled via crontab can be created with
        a custom description
        """
        description = 'test description'
        job = self.scheduler.cron("1 * * * *", say_hello, description=description)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(description, job_from_queue.description)

    def test_repeat_without_interval_raises_error(self):
        # Ensure that an error is raised if repeat is specified without interval
        def create_job():
            self.scheduler.schedule(datetime.utcnow(), say_hello, repeat=11)
        self.assertRaises(ValueError, create_job)

    def test_job_with_intervals_get_rescheduled(self):
        """
        Ensure jobs with interval attribute are put back in the scheduler
        """
        time_now = datetime.utcnow()
        interval = 10
        job = self.scheduler.schedule(time_now, say_hello, interval=interval)
        self.scheduler.enqueue_job(job)
        self.assertIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))
        self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
                         to_unix(time_now) + interval)

        # Now the same thing using enqueue_periodic
        job = self.scheduler.enqueue_periodic(time_now, interval, None, say_hello)
        self.scheduler.enqueue_job(job)
        self.assertIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))
        self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
                         to_unix(time_now) + interval)

    def test_job_with_crontab_get_rescheduled(self):
        # Create a job with a cronjob_string
        job = self.scheduler.cron("1 * * * *", say_hello)

        # current unix_time
        old_next_scheduled_time = self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id)

        # change crontab
        job.meta['cron_string'] = "2 * * * *"

        # enqueue the job
        self.scheduler.enqueue_job(job)

        self.assertIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))

        # check that next scheduled time has changed
        self.assertNotEqual(old_next_scheduled_time,
                            self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id))

        # check that new next scheduled time is set correctly
        expected_next_scheduled_time = to_unix(get_next_scheduled_time("2 * * * *"))
        self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
                         expected_next_scheduled_time)

    def test_job_with_repeat(self):
        """
        Ensure jobs with repeat attribute are put back in the scheduler
        X (repeat) number of times
        """
        time_now = datetime.utcnow()
        interval = 10
        # If job is repeated once, the job shouldn't be put back in the queue
        job = self.scheduler.schedule(time_now, say_hello, interval=interval, repeat=1)
        self.scheduler.enqueue_job(job)
        self.assertNotIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))

        # If job is repeated twice, it should only be put back in the queue once
        job = self.scheduler.schedule(time_now, say_hello, interval=interval, repeat=2)
        self.scheduler.enqueue_job(job)
        self.assertIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))
        self.scheduler.enqueue_job(job)
        self.assertNotIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))

        time_now = datetime.utcnow()
        # Now the same thing using enqueue_periodic
        job = self.scheduler.enqueue_periodic(time_now, interval, 1, say_hello)
        self.scheduler.enqueue_job(job)
        self.assertNotIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))

        # If job is repeated twice, it should only be put back in the queue once
        job = self.scheduler.enqueue_periodic(time_now, interval, 2, say_hello)
        self.scheduler.enqueue_job(job)
        self.assertIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))
        self.scheduler.enqueue_job(job)
        self.assertNotIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))

    def test_missing_jobs_removed_from_scheduler(self):
        """
        Ensure jobs that don't exist when queued are removed from the scheduler.
        """
        job = self.scheduler.schedule(datetime.utcnow(), say_hello)
        job.cancel()
        self.scheduler.get_jobs_to_queue()
        self.assertNotIn(job.id, tl(self.testconn.zrange(
            self.scheduler.scheduled_jobs_key, 0, 1)))

    def test_periodic_jobs_sets_result_ttl(self):
        """
        Ensure periodic jobs set result_ttl to infinite.
        """
        job = self.scheduler.schedule(datetime.utcnow(), say_hello, interval=5)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job.result_ttl, -1)

    def test_periodic_jobs_sets_ttl(self):
        """
        Ensure periodic jobs sets correctly ttl.
        """
        job = self.scheduler.schedule(datetime.utcnow(), say_hello, interval=5, ttl=4)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job.ttl, 4)

    def test_periodic_job_sets_id(self):
        """
        Ensure that ID is passed to RQ by schedule.
        """
        job = self.scheduler.schedule(datetime.utcnow(), say_hello, interval=5, id='id test')
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual('id test', job.id)

    def test_periodic_job_sets_description(self):
        """
        Ensure that description is passed to RQ by schedule.
        """
        job = self.scheduler.schedule(datetime.utcnow(), say_hello, interval=5, description='description')
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual('description', job.description)

    def test_run(self):
        """
        Check correct signal handling in Scheduler.run().
        """
        def send_stop_signal():
            """
            Sleep for 1 second, then send a INT signal to ourself, so the
            signal handler installed by scheduler.run() is called.
            """
            time.sleep(1)
            os.kill(os.getpid(), signal.SIGINT)
        thread = Thread(target=send_stop_signal)
        thread.start()
        self.assertRaises(SystemExit, self.scheduler.run)
        thread.join()

    def test_scheduler_w_o_explicit_connection(self):
        """
        Ensure instantiating Scheduler w/o explicit connection works.
        """
        s = Scheduler()
        self.assertEqual(s.connection, self.testconn)

    def test_small_float_interval(self):
        """
        Test that scheduler accepts 'interval' of type float, less than 1 second.
        """
        key = Scheduler.scheduler_key
        self.assertNotIn(key, tl(self.testconn.keys('*')))
        scheduler = Scheduler(connection=self.testconn, interval=0.1)   # testing interval = 0.1 second
        self.assertEqual(scheduler._interval, 0.1)

        #register birth
        scheduler.register_birth()
        self.assertIn(key, tl(self.testconn.keys('*')))
        self.assertEqual(self.testconn.ttl(key), 10)  # int(0.1) + 10 = 10
        self.assertFalse(self.testconn.hexists(key, 'death'))

        #enqueue a job
        now = datetime.utcnow()
        job = scheduler.enqueue_at(now, say_hello)
        self.assertIn(job, self.scheduler.get_jobs_to_queue())
        self.assertEqual(len(self.scheduler.get_jobs()), 1)

        #register death
        scheduler.register_death()

        #test that run works with the small floating-point interval
        def send_stop_signal():
            """
            Sleep for 1 second, then send a INT signal to ourself, so the
            signal handler installed by scheduler.run() is called.
            """
            time.sleep(1)
            os.kill(os.getpid(), signal.SIGINT)
        thread = Thread(target=send_stop_signal)
        thread.start()
        self.assertRaises(SystemExit, scheduler.run)
        thread.join()

        #all jobs must have been scheduled during 1 second
        self.assertEqual(len(scheduler.get_jobs()), 0)
Пример #29
0
class TestScheduler(RQTestCase):
    def setUp(self):
        super(TestScheduler, self).setUp()
        self.scheduler = Scheduler(connection=self.testconn)

    def test_acquire_lock(self):
        """
        When scheduler acquires a lock, besides creating a key, it should
        also set an expiry that's a few seconds longer than it's polling
        interval so it automatically expires if scheduler is unexpectedly
        terminated.
        """
        key = '%s_lock' % Scheduler.scheduler_key
        self.assertNotIn(key, tl(self.testconn.keys('*')))
        scheduler = Scheduler(connection=self.testconn, interval=20)
        self.assertTrue(scheduler.acquire_lock())
        self.assertIn(key, tl(self.testconn.keys('*')))
        self.assertEqual(self.testconn.ttl(key), 30)
        scheduler.remove_lock()
        self.assertNotIn(key, tl(self.testconn.keys('*')))

    def test_no_two_schedulers_acquire_lock(self):
        """
        Ensure that no two schedulers can acquire the lock at the
        same time. When removing the lock, only the scheduler which
        originally acquired the lock can remove the lock.
        """
        key = '%s_lock' % Scheduler.scheduler_key
        self.assertNotIn(key, tl(self.testconn.keys('*')))
        scheduler1 = Scheduler(connection=self.testconn, interval=20)
        scheduler2 = Scheduler(connection=self.testconn, interval=20)
        self.assertTrue(scheduler1.acquire_lock())
        self.assertFalse(scheduler2.acquire_lock())
        self.assertIn(key, tl(self.testconn.keys('*')))
        scheduler2.remove_lock()
        self.assertIn(key, tl(self.testconn.keys('*')))
        scheduler1.remove_lock()
        self.assertNotIn(key, tl(self.testconn.keys('*')))

    def test_create_job(self):
        """
        Ensure that jobs are created properly.
        """
        job = self.scheduler._create_job(say_hello, args=(), kwargs={})
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job, job_from_queue)
        self.assertEqual(job_from_queue.func, say_hello)

    def test_create_job_with_ttl(self):
        """
        Ensure that TTL is passed to RQ.
        """
        job = self.scheduler._create_job(say_hello, ttl=2, args=(), kwargs={})
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(2, job_from_queue.ttl)

    def test_create_job_with_id(self):
        """
        Ensure that ID is passed to RQ.
        """
        job = self.scheduler._create_job(say_hello,
                                         id='id test',
                                         args=(),
                                         kwargs={})
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual('id test', job_from_queue.id)

    def test_create_job_with_description(self):
        """
        Ensure that description is passed to RQ.
        """
        job = self.scheduler._create_job(say_hello,
                                         description='description',
                                         args=(),
                                         kwargs={})
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual('description', job_from_queue.description)

    def test_create_job_with_timeout(self):
        """
        Ensure that timeout is passed to RQ.
        """
        timeout = 13
        job = self.scheduler._create_job(say_hello,
                                         timeout=13,
                                         args=(),
                                         kwargs={})
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(timeout, job_from_queue.timeout)

    def test_job_not_persisted_if_commit_false(self):
        """
        Ensure jobs are only saved to Redis if commit=True.
        """
        job = self.scheduler._create_job(say_hello, commit=False)
        self.assertEqual(self.testconn.hgetall(job.key), {})

    def test_create_scheduled_job(self):
        """
        Ensure that scheduled jobs are put in the scheduler queue with the right score
        """
        scheduled_time = datetime.utcnow()
        job = self.scheduler.enqueue_at(scheduled_time, say_hello)
        self.assertEqual(job, Job.fetch(job.id, connection=self.testconn))
        self.assertIn(
            job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))
        self.assertEqual(
            self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
            to_unix(scheduled_time))

    def test_create_job_with_meta(self):
        """
        Ensure that meta information on the job is passed to rq
        """
        expected = {'say': 'hello'}
        job = self.scheduler._create_job(say_hello, meta=expected)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(expected, job_from_queue.meta)

    def test_enqueue_at_sets_timeout(self):
        """
        Ensure that a job scheduled via enqueue_at can be created with
        a custom timeout.
        """
        timeout = 13
        job = self.scheduler.enqueue_at(datetime.utcnow(),
                                        say_hello,
                                        timeout=timeout)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job_from_queue.timeout, timeout)

    def test_enqueue_at_sets_job_id(self):
        """
        Ensure that a job scheduled via enqueue_at can be created with
        a custom job id.
        """
        job_id = 'test_id'
        job = self.scheduler.enqueue_at(datetime.utcnow(),
                                        say_hello,
                                        job_id=job_id)
        self.assertEqual(job.id, job_id)

    def test_enqueue_at_sets_job_ttl(self):
        """
        Ensure that a job scheduled via enqueue_at can be created with a custom job ttl.
        """
        job_ttl = 123456789
        job = self.scheduler.enqueue_at(datetime.utcnow(),
                                        say_hello,
                                        job_ttl=job_ttl)
        self.assertEqual(job.ttl, job_ttl)

    def test_enqueue_at_sets_job_result_ttl(self):
        """
        Ensure that a job scheduled via enqueue_at can be created with a custom result ttl.
        """
        job_result_ttl = 1234567890
        job = self.scheduler.enqueue_at(datetime.utcnow(),
                                        say_hello,
                                        job_result_ttl=job_result_ttl)
        self.assertEqual(job.result_ttl, job_result_ttl)

    def test_enqueue_at_sets_meta(self):
        """
        Ensure that a job scheduled via enqueue_at can be created with a custom meta.
        """
        meta = {'say': 'hello'}
        job = self.scheduler.enqueue_at(datetime.utcnow(),
                                        say_hello,
                                        meta=meta)
        self.assertEqual(job.meta, meta)

    def test_enqueue_in(self):
        """
        Ensure that jobs have the right scheduled time.
        """
        right_now = datetime.utcnow()
        time_delta = timedelta(minutes=1)
        job = self.scheduler.enqueue_in(time_delta, say_hello)
        self.assertIn(
            job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))
        self.assertEqual(
            self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
            to_unix(right_now + time_delta))
        time_delta = timedelta(hours=1)
        job = self.scheduler.enqueue_in(time_delta, say_hello)
        self.assertEqual(
            self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
            to_unix(right_now + time_delta))

    def test_enqueue_in_sets_timeout(self):
        """
        Ensure that a job scheduled via enqueue_in can be created with
        a custom timeout.
        """
        timeout = 13
        job = self.scheduler.enqueue_in(timedelta(minutes=1),
                                        say_hello,
                                        timeout=timeout)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job_from_queue.timeout, timeout)

    def test_enqueue_in_sets_job_id(self):
        """
        Ensure that a job scheduled via enqueue_in can be created with
        a custom job id.
        """
        job_id = 'test_id'
        job = self.scheduler.enqueue_in(timedelta(minutes=1),
                                        say_hello,
                                        job_id=job_id)
        self.assertEqual(job.id, job_id)

    def test_enqueue_in_sets_job_ttl(self):
        """
        Ensure that a job scheduled via enqueue_in can be created with a custom job ttl.
        """
        job_ttl = 123456789
        job = self.scheduler.enqueue_in(timedelta(minutes=1),
                                        say_hello,
                                        job_ttl=job_ttl)
        self.assertEqual(job.ttl, job_ttl)

    def test_enqueue_in_sets_job_result_ttl(self):
        """
        Ensure that a job scheduled via enqueue_in can be created with a custom result ttl.
        """
        job_result_ttl = 1234567890
        job = self.scheduler.enqueue_in(timedelta(minutes=1),
                                        say_hello,
                                        job_result_ttl=job_result_ttl)
        self.assertEqual(job.result_ttl, job_result_ttl)

    def test_enqueue_in_sets_meta(self):
        """
        Ensure that a job scheduled via enqueue_in sets meta.
        """
        meta = {'say': 'hello'}
        job = self.scheduler.enqueue_in(timedelta(minutes=1),
                                        say_hello,
                                        meta=meta)
        self.assertEqual(job.meta, meta)

    def test_count(self):
        now = datetime.utcnow()
        self.scheduler.enqueue_at(now, say_hello)
        self.assertEqual(self.scheduler.count(), 1)

        future_time = now + timedelta(hours=1)
        future_test_time = now + timedelta(minutes=59, seconds=59)

        self.scheduler.enqueue_at(future_time, say_hello)

        self.assertEqual(
            self.scheduler.count(timedelta(minutes=59, seconds=59)), 1)
        self.assertEqual(self.scheduler.count(future_test_time), 1)
        self.assertEqual(self.scheduler.count(), 2)

    def test_get_jobs(self):
        """
        Ensure get_jobs() returns all jobs until the specified time.
        """
        now = datetime.utcnow()
        job = self.scheduler.enqueue_at(now, say_hello)
        self.assertIn(job, self.scheduler.get_jobs(now))
        future_time = now + timedelta(hours=1)
        job = self.scheduler.enqueue_at(future_time, say_hello)
        self.assertIn(job,
                      self.scheduler.get_jobs(timedelta(hours=1, seconds=1)))
        self.assertIn(job,
                      [j[0] for j in self.scheduler.get_jobs(with_times=True)])
        self.assertIsInstance(
            list(self.scheduler.get_jobs(with_times=True))[0][1], datetime)
        self.assertNotIn(
            job, self.scheduler.get_jobs(timedelta(minutes=59, seconds=59)))

    def test_get_jobs_slice(self):
        """
        Ensure get_jobs() returns the appropriate slice of all jobs using offset and length.
        """
        now = datetime.utcnow()
        future_time = now + timedelta(hours=1)
        future_test_time = now + timedelta(minutes=59, seconds=59)

        # Schedule each job a second later than the previous job,
        # otherwise Redis will return jobs that have the same scheduled time in
        # lexicographical order (not the order in which we enqueued them)
        now_jobs = [
            self.scheduler.enqueue_at(now + timedelta(seconds=x), say_hello)
            for x in range(15)
        ]
        future_jobs = [
            self.scheduler.enqueue_at(future_time + timedelta(seconds=x),
                                      say_hello) for x in range(15)
        ]

        expected_slice = now_jobs[
            5:] + future_jobs[:
                              10]  # last 10 from now_jobs and first 10 from future_jobs
        expected_until_slice = now_jobs[5:]  # last 10 from now_jobs

        jobs = self.scheduler.get_jobs()
        jobs_slice = self.scheduler.get_jobs(offset=5, length=20)
        jobs_until_slice = self.scheduler.get_jobs(future_test_time,
                                                   offset=5,
                                                   length=20)

        self.assertEqual(now_jobs + future_jobs, list(jobs))
        self.assertEqual(expected_slice, list(jobs_slice))
        self.assertEqual(expected_until_slice, list(jobs_until_slice))

    def test_get_jobs_to_queue(self):
        """
        Ensure that jobs scheduled the future are not queued.
        """
        now = datetime.utcnow()
        job = self.scheduler.enqueue_at(now, say_hello)
        self.assertIn(job, self.scheduler.get_jobs_to_queue())
        future_time = now + timedelta(hours=1)
        job = self.scheduler.enqueue_at(future_time, say_hello)
        self.assertNotIn(job, self.scheduler.get_jobs_to_queue())

    def test_enqueue_job(self):
        """
        When scheduled job is enqueued, make sure:
        - Job is removed from the sorted set of scheduled jobs
        - "enqueued_at" attribute is properly set
        - Job appears in the right queue
        - Queue is recognized by rq's Queue.all()
        """
        now = datetime.utcnow()
        queue_name = 'foo'
        scheduler = Scheduler(connection=self.testconn, queue_name=queue_name)

        job = scheduler.enqueue_at(now, say_hello)
        self.scheduler.enqueue_job(job)
        self.assertNotIn(
            job, tl(self.testconn.zrange(scheduler.scheduled_jobs_key, 0, 10)))
        job = Job.fetch(job.id, connection=self.testconn)
        self.assertTrue(job.enqueued_at is not None)
        queue = scheduler.get_queue_for_job(job)
        self.assertIn(job, queue.jobs)
        queue = Queue.from_queue_key('rq:queue:{0}'.format(queue_name))
        self.assertIn(job, queue.jobs)
        self.assertIn(queue, Queue.all())

    def test_enqueue_job_with_scheduler_queue(self):
        """
        Ensure that job is enqueued correctly when the scheduler is bound
        to a queue object and job queue name is not provided.
        """
        queue = Queue('foo', connection=self.testconn)
        scheduler = Scheduler(connection=self.testconn, queue=queue)
        job = scheduler._create_job(say_hello)
        scheduler_queue = scheduler.get_queue_for_job(job)
        self.assertEqual(queue, scheduler_queue)
        scheduler.enqueue_job(job)
        self.assertTrue(job.enqueued_at is not None)
        self.assertIn(job, queue.jobs)
        self.assertIn(queue, Queue.all())

    def test_enqueue_job_with_job_queue_name(self):
        """
        Ensure that job is enqueued correctly when queue_name is provided
        at job creation
        """
        queue = Queue('foo', connection=self.testconn)
        job_queue = Queue('job_foo', connection=self.testconn)
        scheduler = Scheduler(connection=self.testconn, queue=queue)
        job = scheduler._create_job(say_hello, queue_name='job_foo')
        self.assertEqual(scheduler.get_queue_for_job(job), job_queue)
        scheduler.enqueue_job(job)
        self.assertTrue(job.enqueued_at is not None)
        self.assertIn(job, job_queue.jobs)
        self.assertIn(job_queue, Queue.all())

    def test_enqueue_at_with_job_queue_name(self):
        """
        Ensure that job is enqueued correctly when queue_name is provided
        to enqueue_at
        """
        queue = Queue('foo', connection=self.testconn)
        job_queue = Queue('job_foo', connection=self.testconn)
        scheduler = Scheduler(connection=self.testconn, queue=queue)
        job = scheduler.enqueue_at(datetime.utcnow(),
                                   say_hello,
                                   queue_name='job_foo')
        self.assertEqual(scheduler.get_queue_for_job(job), job_queue)
        self.scheduler.enqueue_job(job)
        self.assertTrue(job.enqueued_at is not None)
        self.assertIn(job, job_queue.jobs)
        self.assertIn(job_queue, Queue.all())

    def test_job_membership(self):
        now = datetime.utcnow()
        job = self.scheduler.enqueue_at(now, say_hello)
        self.assertIn(job, self.scheduler)
        self.assertIn(job.id, self.scheduler)
        self.assertNotIn("non-existing-job-id", self.scheduler)

    def test_cancel_scheduled_job(self):
        """
        When scheduled job is canceled, make sure:
        - Job is removed from the sorted set of scheduled jobs
        """
        # schedule a job to be enqueued one minute from now
        time_delta = timedelta(minutes=1)
        job = self.scheduler.enqueue_in(time_delta, say_hello)
        # cancel the scheduled job and check that it's gone from the set
        self.scheduler.cancel(job)
        self.assertNotIn(
            job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))

    def test_change_execution_time(self):
        """
        Ensure ``change_execution_time`` is called, ensure that job's score is updated
        """
        job = self.scheduler.enqueue_at(datetime.utcnow(), say_hello)
        new_date = datetime(2010, 1, 1)
        self.scheduler.change_execution_time(job, new_date)
        self.assertEqual(
            to_unix(new_date),
            self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id))
        self.scheduler.cancel(job)
        self.assertRaises(ValueError, self.scheduler.change_execution_time,
                          job, new_date)

    def test_args_kwargs_are_passed_correctly(self):
        """
        Ensure that arguments and keyword arguments are properly saved to jobs.
        """
        job = self.scheduler.enqueue_at(datetime.utcnow(), simple_addition, 1,
                                        1, 1)
        self.assertEqual(job.args, (1, 1, 1))
        job = self.scheduler.enqueue_at(datetime.utcnow(),
                                        simple_addition,
                                        z=1,
                                        y=1,
                                        x=1)
        self.assertEqual(job.kwargs, {'x': 1, 'y': 1, 'z': 1})
        job = self.scheduler.enqueue_at(datetime.utcnow(),
                                        simple_addition,
                                        1,
                                        z=1,
                                        y=1)
        self.assertEqual(job.kwargs, {'y': 1, 'z': 1})
        self.assertEqual(job.args, (1, ))

        time_delta = timedelta(minutes=1)
        job = self.scheduler.enqueue_in(time_delta, simple_addition, 1, 1, 1)
        self.assertEqual(job.args, (1, 1, 1))
        job = self.scheduler.enqueue_in(time_delta,
                                        simple_addition,
                                        z=1,
                                        y=1,
                                        x=1)
        self.assertEqual(job.kwargs, {'x': 1, 'y': 1, 'z': 1})
        job = self.scheduler.enqueue_in(time_delta,
                                        simple_addition,
                                        1,
                                        z=1,
                                        y=1)
        self.assertEqual(job.kwargs, {'y': 1, 'z': 1})
        self.assertEqual(job.args, (1, ))

    def test_interval_and_repeat_persisted_correctly(self):
        """
        Ensure that interval and repeat attributes are correctly saved.
        """
        job = self.scheduler.schedule(datetime.utcnow(),
                                      say_hello,
                                      interval=10,
                                      repeat=11)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job_from_queue.meta['interval'], 10)
        self.assertEqual(job_from_queue.meta['repeat'], 11)

    def test_crontab_persisted_correctly(self):
        """
        Ensure that crontab attribute gets correctly saved in Redis.
        """
        # create a job that runs one minute past each whole hour
        job = self.scheduler.cron("1 * * * *", say_hello)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job_from_queue.meta['cron_string'], "1 * * * *")

        # get the scheduled_time and convert it to a datetime object
        unix_time = self.testconn.zscore(self.scheduler.scheduled_jobs_key,
                                         job.id)
        datetime_time = from_unix(unix_time)

        # check that minute=1, seconds=0, and is within an hour
        assert datetime_time.minute == 1
        assert datetime_time.second == 0
        assert datetime_time - datetime.utcnow() < timedelta(hours=1)

    def test_crontab_persisted_correctly_with_local_timezone(self):
        """
        Ensure that crontab attribute gets correctly saved in Redis when using local TZ.
        """
        # create a job that runs one minute past each whole hour
        job = self.scheduler.cron("0 15 * * *",
                                  say_hello,
                                  use_local_timezone=True)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job_from_queue.meta['cron_string'], "0 15 * * *")

        # get the scheduled_time and convert it to a datetime object
        unix_time = self.testconn.zscore(self.scheduler.scheduled_jobs_key,
                                         job.id)
        datetime_time = from_unix(unix_time)

        expected_datetime_in_local_tz = datetime.now(
            get_utc_timezone()).replace(hour=15,
                                        minute=0,
                                        second=0,
                                        microsecond=0)
        assert datetime_time.time(
        ) == expected_datetime_in_local_tz.astimezone(
            get_utc_timezone()).time()

    def test_crontab_rescheduled_correctly_with_local_timezone(self):
        # Create a job with a cronjob_string
        job = self.scheduler.cron("1 15 * * *",
                                  say_hello,
                                  use_local_timezone=True)

        # change crontab
        job.meta['cron_string'] = "2 15 * * *"

        # reenqueue the job
        self.scheduler.enqueue_job(job)

        # get the scheduled_time and convert it to a datetime object
        unix_time = self.testconn.zscore(self.scheduler.scheduled_jobs_key,
                                         job.id)
        datetime_time = from_unix(unix_time)

        expected_datetime_in_local_tz = datetime.now(
            get_utc_timezone()).replace(hour=15,
                                        minute=2,
                                        second=0,
                                        microsecond=0)
        assert datetime_time.time(
        ) == expected_datetime_in_local_tz.astimezone(
            get_utc_timezone()).time()

    def test_crontab_sets_timeout(self):
        """
        Ensure that a job scheduled via crontab can be created with
        a custom timeout.
        """
        timeout = 13
        job = self.scheduler.cron("1 * * * *", say_hello, timeout=timeout)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job_from_queue.timeout, timeout)

    def test_crontab_sets_id(self):
        """
        Ensure that a job scheduled via crontab can be created with
        a custom id
        """
        job_id = "hello-job-id"
        job = self.scheduler.cron("1 * * * *", say_hello, id=job_id)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job_id, job_from_queue.id)

    def test_crontab_sets_default_result_ttl(self):
        """
        Ensure that a job scheduled via crontab gets proper default
        result_ttl (-1) periodic tasks.
        """
        job = self.scheduler.cron("1 * * * *", say_hello)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(-1, job_from_queue.result_ttl)

    def test_crontab_sets_description(self):
        """
        Ensure that a job scheduled via crontab can be created with
        a custom description
        """
        description = 'test description'
        job = self.scheduler.cron("1 * * * *",
                                  say_hello,
                                  description=description)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(description, job_from_queue.description)

    def test_repeat_without_interval_raises_error(self):
        # Ensure that an error is raised if repeat is specified without interval
        def create_job():
            self.scheduler.schedule(datetime.utcnow(), say_hello, repeat=11)

        self.assertRaises(ValueError, create_job)

    def test_job_with_intervals_get_rescheduled(self):
        """
        Ensure jobs with interval attribute are put back in the scheduler
        """
        time_now = datetime.utcnow()
        interval = 10
        job = self.scheduler.schedule(time_now, say_hello, interval=interval)
        self.scheduler.enqueue_job(job)
        self.assertIn(
            job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))
        self.assertEqual(
            self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
            to_unix(time_now) + interval)

    def test_job_with_interval_can_set_meta(self):
        """
        Ensure that jobs with interval attribute can be created with meta
        """
        time_now = datetime.utcnow()
        interval = 10
        meta = {'say': 'hello'}
        job = self.scheduler.schedule(time_now,
                                      say_hello,
                                      interval=interval,
                                      meta=meta)
        self.scheduler.enqueue_job(job)
        self.assertEqual(job.meta, meta)

    def test_job_with_crontab_get_rescheduled(self):
        # Create a job with a cronjob_string
        job = self.scheduler.cron("1 * * * *", say_hello)

        # current unix_time
        old_next_scheduled_time = self.testconn.zscore(
            self.scheduler.scheduled_jobs_key, job.id)

        # change crontab
        job.meta['cron_string'] = "2 * * * *"

        # enqueue the job
        self.scheduler.enqueue_job(job)

        self.assertIn(
            job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))

        # check that next scheduled time has changed
        self.assertNotEqual(
            old_next_scheduled_time,
            self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id))

        # check that new next scheduled time is set correctly
        expected_next_scheduled_time = to_unix(
            get_next_scheduled_time("2 * * * *"))
        self.assertEqual(
            self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
            expected_next_scheduled_time)

    def test_job_with_repeat(self):
        """
        Ensure jobs with repeat attribute are put back in the scheduler
        X (repeat) number of times
        """
        time_now = datetime.utcnow()
        interval = 10
        # If job is repeated once, the job shouldn't be put back in the queue
        job = self.scheduler.schedule(time_now,
                                      say_hello,
                                      interval=interval,
                                      repeat=1)
        self.scheduler.enqueue_job(job)
        self.assertNotIn(
            job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))

        # If job is repeated twice, it should only be put back in the queue once
        job = self.scheduler.schedule(time_now,
                                      say_hello,
                                      interval=interval,
                                      repeat=2)
        self.scheduler.enqueue_job(job)
        self.assertIn(
            job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))
        self.scheduler.enqueue_job(job)
        self.assertNotIn(
            job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))

    def test_missing_jobs_removed_from_scheduler(self):
        """
        Ensure jobs that don't exist when queued are removed from the scheduler.
        """
        job = self.scheduler.schedule(datetime.utcnow(), say_hello)
        job.cancel()
        list(self.scheduler.get_jobs_to_queue())
        self.assertIn(
            job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))
        job.delete()
        list(self.scheduler.get_jobs_to_queue())
        self.assertNotIn(
            job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))

    def test_periodic_jobs_sets_result_ttl(self):
        """
        Ensure periodic jobs set result_ttl to infinite.
        """
        job = self.scheduler.schedule(datetime.utcnow(), say_hello, interval=5)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job.result_ttl, -1)

    def test_periodic_jobs_sets_ttl(self):
        """
        Ensure periodic jobs sets correctly ttl.
        """
        job = self.scheduler.schedule(datetime.utcnow(),
                                      say_hello,
                                      interval=5,
                                      ttl=4)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job.ttl, 4)

    def test_periodic_jobs_sets_meta(self):
        """
        Ensure periodic jobs sets correctly meta.
        """
        meta = {'say': 'hello'}
        job = self.scheduler.schedule(datetime.utcnow(),
                                      say_hello,
                                      interval=5,
                                      meta=meta)
        self.assertEqual(meta, job.meta)

    def test_periodic_job_sets_id(self):
        """
        Ensure that ID is passed to RQ by schedule.
        """
        job = self.scheduler.schedule(datetime.utcnow(),
                                      say_hello,
                                      interval=5,
                                      id='id test')
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual('id test', job.id)

    def test_periodic_job_sets_description(self):
        """
        Ensure that description is passed to RQ by schedule.
        """
        job = self.scheduler.schedule(datetime.utcnow(),
                                      say_hello,
                                      interval=5,
                                      description='description')
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual('description', job.description)

    def test_run(self):
        """
        Check correct signal handling in Scheduler.run().
        """
        def send_stop_signal():
            """
            Sleep for 1 second, then send a INT signal to ourself, so the
            signal handler installed by scheduler.run() is called.
            """
            time.sleep(1)
            os.kill(os.getpid(), signal.SIGINT)

        thread = Thread(target=send_stop_signal)
        thread.start()
        self.assertRaises(SystemExit, self.scheduler.run)
        thread.join()

    def test_run_burst(self):
        """
        Check burst mode of Scheduler.run().
        """
        now = datetime.utcnow()
        job = self.scheduler.enqueue_at(now, say_hello)
        self.assertIn(job, self.scheduler.get_jobs_to_queue())
        self.assertEqual(len(list(self.scheduler.get_jobs())), 1)
        self.scheduler.run(burst=True)
        self.assertEqual(len(list(self.scheduler.get_jobs())), 0)

    def test_scheduler_w_o_explicit_connection(self):
        """
        Ensure instantiating Scheduler w/o explicit connection works.
        """
        s = Scheduler()
        self.assertEqual(s.connection, self.testconn)

    def test_small_float_interval(self):
        """
        Test that scheduler accepts 'interval' of type float, less than 1 second.
        """
        key = Scheduler.scheduler_key
        lock_key = '%s_lock' % Scheduler.scheduler_key
        self.assertNotIn(key, tl(self.testconn.keys('*')))
        scheduler = Scheduler(connection=self.testconn,
                              interval=0.1)  # testing interval = 0.1 second
        self.assertEqual(scheduler._interval, 0.1)

        #acquire lock
        self.assertTrue(scheduler.acquire_lock())
        self.assertIn(lock_key, tl(self.testconn.keys('*')))
        self.assertEqual(self.testconn.ttl(lock_key), 10)  # int(0.1) + 10 = 10

        #enqueue a job
        now = datetime.utcnow()
        job = scheduler.enqueue_at(now, say_hello)
        self.assertIn(job, self.scheduler.get_jobs_to_queue())
        self.assertEqual(len(list(self.scheduler.get_jobs())), 1)

        #remove the lock
        scheduler.remove_lock()

        #test that run works with the small floating-point interval
        def send_stop_signal():
            """
            Sleep for 1 second, then send a INT signal to ourself, so the
            signal handler installed by scheduler.run() is called.
            """
            time.sleep(1)
            os.kill(os.getpid(), signal.SIGINT)

        thread = Thread(target=send_stop_signal)
        thread.start()
        self.assertRaises(SystemExit, scheduler.run)
        thread.join()

        #all jobs must have been scheduled during 1 second
        self.assertEqual(len(list(scheduler.get_jobs())), 0)

    def test_get_queue_for_job_with_job_queue_name(self):
        """
        Tests that scheduler gets the correct queue for the job when
        queue_name is provided.
        """
        queue = Queue('scheduler_foo', connection=self.testconn)
        job_queue = Queue('job_foo', connection=self.testconn)
        scheduler = Scheduler(connection=self.testconn, queue=queue)
        job = scheduler._create_job(say_hello, queue_name='job_foo')
        self.assertEqual(scheduler.get_queue_for_job(job), job_queue)

    def test_get_queue_for_job_without_job_queue_name(self):
        """
        Tests that scheduler gets the scheduler queue for the job
        when queue name is not provided for that job.
        """
        queue = Queue('scheduler_foo', connection=self.testconn)
        scheduler = Scheduler(connection=self.testconn, queue=queue)
        job = scheduler._create_job(say_hello)
        self.assertEqual(scheduler.get_queue_for_job(job), queue)
Пример #30
0
def main():
    parser = argparse.ArgumentParser()

    parser.add_argument("sub",
                        help="The subreddit to query (or all for the frontpage)")

    parser.add_argument("pagetype",
                        choices=["new", "top", "contr", "hot", "comments", "modactions"],
                        help="For front pages, what page to query")
    parser.add_argument("interval",
                        default = 120, # default 2 minutes
                        help="Interval between tasks in seconds (default 2 minutes)")
    parser.add_argument("-e", '--env',
                        choices=['development', 'test', 'production', 'CivilServantBot2'],
                        required = False,
                        help="Run within a specific environment. Otherwise run under the environment defined in the environment variable CS_ENV")
    parser.add_argument("-p", "--profile",
                        required = False,
                        action = 'store_true',
                        help="Run the performance profiler and save the results in the logs/profiles directory")

    args = parser.parse_args()

    # if the user specified the environment, set it here
    if args.env!=None:
        os.environ['CS_ENV'] = args.env
    
    queue_name = os.environ['CS_ENV']
    scheduler = Scheduler(queue_name = os.environ['CS_ENV'], connection=Redis())


    page_type = args.pagetype.lower()

    timeout_seconds = 172800 ## two days in seconds
    if(timeout_seconds <= int(args.interval) + 3600):
        timeout_seconds = int(args.interval) + 3600
    ttl = timeout_seconds + 180


    if(args.sub =="all"):
        page_type = getattr(PageType, args.pagetype.upper())
        scheduler.schedule(
                scheduled_time=datetime.utcnow(),
                func=app.controller.fetch_reddit_front,
                args=[page_type],
                kwargs={'_profile': args.profile},
                interval=int(args.interval),
                repeat=None,
                timeout = timeout_seconds,
                result_ttl = ttl)
    else:
        if(page_type == "comments"):
            scheduler.schedule(
                    scheduled_time=datetime.utcnow(),
                    func=app.controller.fetch_last_thousand_comments,
                    args=[args.sub],
                    kwargs={'_profile': args.profile},
                    interval=int(args.interval),
                    repeat=None,
                    timeout = timeout_seconds,
                    result_ttl = ttl)
        elif(page_type == "modactions"):
            scheduler.schedule(
                    scheduled_time=datetime.utcnow(),
                    func=app.controller.fetch_mod_action_history,
                    args=[args.sub],
                    kwargs={'_profile': args.profile},
                    interval=int(args.interval),
                    repeat=None,
                    timeout = timeout_seconds,
                    result_ttl = ttl)
        else:
            page_type = getattr(PageType, args.pagetype.upper())
            scheduler.schedule(
                    scheduled_time=datetime.utcnow(),
                    func=app.controller.fetch_subreddit_front,
                    args=[args.sub, page_type],
                    kwargs={'_profile': args.profile},
                    interval=int(args.interval),
                    repeat=None,
                    timeout = timeout_seconds,
                    result_ttl = ttl)
Пример #31
0
from rq_scheduler import Scheduler
from datetime import datetime

from hello import workerstuff

listen = ['high', 'default', 'low']

#redis_url= urlparse.urlparse(os.environ.get('REDISCLOUD_URL'))
#r=redis.Redis(host=url.hostname, port=url.port, password=url.password)

redis_url = os.getenv('REDISCLOUD_URL', 'redis://localhost:6379')
print redis_url
conn = redis.from_url(redis_url)

use_connection()

scheduler = Scheduler()
scheduler.schedule(datetime.now(), workerstuff, interval=30)

#
#
# q=Queue(connection=conn)
# q.
# result=q.enqueue(workerstuff)

if __name__ == '__main__':
    with Connection(conn):
        worker = Worker(map(Queue, listen))
        worker.work()
Пример #32
0
class TestScheduler(RQTestCase):

    def setUp(self):
        super(TestScheduler, self).setUp()
        self.scheduler = Scheduler(connection=self.testconn)

    def test_acquire_lock(self):
        """
        When scheduler acquires a lock, besides creating a key, it should
        also set an expiry that's a few seconds longer than it's polling
        interval so it automatically expires if scheduler is unexpectedly
        terminated.
        """
        key = '%s_lock' % Scheduler.scheduler_key
        self.assertNotIn(key, tl(self.testconn.keys('*')))
        scheduler = Scheduler(connection=self.testconn, interval=20)
        self.assertTrue(scheduler.acquire_lock())
        self.assertIn(key, tl(self.testconn.keys('*')))
        self.assertEqual(self.testconn.ttl(key), 30)
        scheduler.remove_lock()
        self.assertNotIn(key, tl(self.testconn.keys('*')))

    def test_no_two_schedulers_acquire_lock(self):
        """
        Ensure that no two schedulers can acquire the lock at the
        same time. When removing the lock, only the scheduler which
        originally acquired the lock can remove the lock.
        """
        key = '%s_lock' % Scheduler.scheduler_key
        self.assertNotIn(key, tl(self.testconn.keys('*')))
        scheduler1 = Scheduler(connection=self.testconn, interval=20)
        scheduler2 = Scheduler(connection=self.testconn, interval=20)
        self.assertTrue(scheduler1.acquire_lock())
        self.assertFalse(scheduler2.acquire_lock())
        self.assertIn(key, tl(self.testconn.keys('*')))
        scheduler2.remove_lock()
        self.assertIn(key, tl(self.testconn.keys('*')))
        scheduler1.remove_lock()
        self.assertNotIn(key, tl(self.testconn.keys('*')))

    def test_create_job(self):
        """
        Ensure that jobs are created properly.
        """
        job = self.scheduler._create_job(say_hello, args=(), kwargs={})
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job, job_from_queue)
        self.assertEqual(job_from_queue.func, say_hello)

    def test_create_job_with_ttl(self):
        """
        Ensure that TTL is passed to RQ.
        """
        job = self.scheduler._create_job(say_hello, ttl=2, args=(), kwargs={})
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(2, job_from_queue.ttl)

    def test_create_job_with_id(self):
        """
        Ensure that ID is passed to RQ.
        """
        job = self.scheduler._create_job(say_hello, id='id test', args=(), kwargs={})
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual('id test', job_from_queue.id)

    def test_create_job_with_description(self):
        """
        Ensure that description is passed to RQ.
        """
        job = self.scheduler._create_job(say_hello, description='description', args=(), kwargs={})
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual('description', job_from_queue.description)

    def test_create_job_with_timeout(self):
        """
        Ensure that timeout is passed to RQ.
        """
        timeout = 13
        job = self.scheduler._create_job(say_hello, timeout=13, args=(), kwargs={})
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(timeout, job_from_queue.timeout)

    def test_job_not_persisted_if_commit_false(self):
        """
        Ensure jobs are only saved to Redis if commit=True.
        """
        job = self.scheduler._create_job(say_hello, commit=False)
        self.assertEqual(self.testconn.hgetall(job.key), {})

    def test_create_scheduled_job(self):
        """
        Ensure that scheduled jobs are put in the scheduler queue with the right score
        """
        scheduled_time = datetime.utcnow()
        job = self.scheduler.enqueue_at(scheduled_time, say_hello)
        self.assertEqual(job, Job.fetch(job.id, connection=self.testconn))
        self.assertIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))
        self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
                         to_unix(scheduled_time))

    def test_create_job_with_meta(self):
        """
        Ensure that meta information on the job is passed to rq
        """
        expected = {'say': 'hello'}
        job = self.scheduler._create_job(say_hello, meta=expected)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(expected, job_from_queue.meta)

    def test_enqueue_at_sets_timeout(self):
        """
        Ensure that a job scheduled via enqueue_at can be created with
        a custom timeout.
        """
        timeout = 13
        job = self.scheduler.enqueue_at(datetime.utcnow(), say_hello, timeout=timeout)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job_from_queue.timeout, timeout)

    def test_enqueue_at_sets_job_id(self):
        """
        Ensure that a job scheduled via enqueue_at can be created with
        a custom job id.
        """
        job_id = 'test_id'
        job = self.scheduler.enqueue_at(datetime.utcnow(), say_hello, job_id=job_id)
        self.assertEqual(job.id, job_id)

    def test_enqueue_at_sets_job_ttl(self):
        """
        Ensure that a job scheduled via enqueue_at can be created with a custom job ttl.
        """
        job_ttl = 123456789
        job = self.scheduler.enqueue_at(datetime.utcnow(), say_hello, job_ttl=job_ttl)
        self.assertEqual(job.ttl, job_ttl)

    def test_enqueue_at_sets_job_result_ttl(self):
        """
        Ensure that a job scheduled via enqueue_at can be created with a custom result ttl.
        """
        job_result_ttl = 1234567890
        job = self.scheduler.enqueue_at(datetime.utcnow(), say_hello, job_result_ttl=job_result_ttl)
        self.assertEqual(job.result_ttl, job_result_ttl)

    def test_enqueue_at_sets_meta(self):
        """
        Ensure that a job scheduled via enqueue_at can be created with a custom meta.
        """
        meta = {'say': 'hello'}
        job = self.scheduler.enqueue_at(datetime.utcnow(), say_hello, meta=meta)
        self.assertEqual(job.meta, meta)

    def test_enqueue_in(self):
        """
        Ensure that jobs have the right scheduled time.
        """
        right_now = datetime.utcnow()
        time_delta = timedelta(minutes=1)
        job = self.scheduler.enqueue_in(time_delta, say_hello)
        self.assertIn(job.id,
                      tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))
        self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
                         to_unix(right_now + time_delta))
        time_delta = timedelta(hours=1)
        job = self.scheduler.enqueue_in(time_delta, say_hello)
        self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
                         to_unix(right_now + time_delta))

    def test_enqueue_in_sets_timeout(self):
        """
        Ensure that a job scheduled via enqueue_in can be created with
        a custom timeout.
        """
        timeout = 13
        job = self.scheduler.enqueue_in(timedelta(minutes=1), say_hello, timeout=timeout)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job_from_queue.timeout, timeout)

    def test_enqueue_in_sets_job_id(self):
        """
        Ensure that a job scheduled via enqueue_in can be created with
        a custom job id.
        """
        job_id = 'test_id'
        job = self.scheduler.enqueue_in(timedelta(minutes=1), say_hello, job_id=job_id)
        self.assertEqual(job.id, job_id)

    def test_enqueue_in_sets_job_ttl(self):
        """
        Ensure that a job scheduled via enqueue_in can be created with a custom job ttl.
        """
        job_ttl = 123456789
        job = self.scheduler.enqueue_in(timedelta(minutes=1), say_hello, job_ttl=job_ttl)
        self.assertEqual(job.ttl, job_ttl)

    def test_enqueue_in_sets_job_result_ttl(self):
        """
        Ensure that a job scheduled via enqueue_in can be created with a custom result ttl.
        """
        job_result_ttl = 1234567890
        job = self.scheduler.enqueue_in(timedelta(minutes=1), say_hello, job_result_ttl=job_result_ttl)
        self.assertEqual(job.result_ttl, job_result_ttl)

    def test_enqueue_in_sets_meta(self):
        """
        Ensure that a job scheduled via enqueue_in sets meta.
        """
        meta = {'say': 'hello'}
        job = self.scheduler.enqueue_in(timedelta(minutes=1), say_hello, meta=meta)
        self.assertEqual(job.meta, meta)

    def test_count(self):
        now = datetime.utcnow()
        self.scheduler.enqueue_at(now, say_hello)
        self.assertEqual(self.scheduler.count(), 1)

        future_time = now + timedelta(hours=1)
        future_test_time = now + timedelta(minutes=59, seconds=59)

        self.scheduler.enqueue_at(future_time, say_hello)

        self.assertEqual(self.scheduler.count(timedelta(minutes=59, seconds=59)), 1)
        self.assertEqual(self.scheduler.count(future_test_time), 1)
        self.assertEqual(self.scheduler.count(), 2)

    def test_get_jobs(self):
        """
        Ensure get_jobs() returns all jobs until the specified time.
        """
        now = datetime.utcnow()
        job = self.scheduler.enqueue_at(now, say_hello)
        self.assertIn(job, self.scheduler.get_jobs(now))
        future_time = now + timedelta(hours=1)
        job = self.scheduler.enqueue_at(future_time, say_hello)
        self.assertIn(job, self.scheduler.get_jobs(timedelta(hours=1, seconds=1)))
        self.assertIn(job, [j[0] for j in self.scheduler.get_jobs(with_times=True)])
        self.assertIsInstance(list(self.scheduler.get_jobs(with_times=True))[0][1], datetime)
        self.assertNotIn(job, self.scheduler.get_jobs(timedelta(minutes=59, seconds=59)))

    def test_get_jobs_slice(self):
        """
        Ensure get_jobs() returns the appropriate slice of all jobs using offset and length.
        """
        now = datetime.utcnow()
        future_time = now + timedelta(hours=1)
        future_test_time = now + timedelta(minutes=59, seconds=59)

        # Schedule each job a second later than the previous job,
        # otherwise Redis will return jobs that have the same scheduled time in
        # lexicographical order (not the order in which we enqueued them)
        now_jobs = [self.scheduler.enqueue_at(now + timedelta(seconds=x), say_hello)
                    for x in range(15)]
        future_jobs = [self.scheduler.enqueue_at(future_time + timedelta(seconds=x), say_hello)
                       for x in range(15)]

        expected_slice = now_jobs[5:] + future_jobs[:10]   # last 10 from now_jobs and first 10 from future_jobs
        expected_until_slice = now_jobs[5:]                # last 10 from now_jobs

        jobs = self.scheduler.get_jobs()
        jobs_slice = self.scheduler.get_jobs(offset=5, length=20)
        jobs_until_slice = self.scheduler.get_jobs(future_test_time, offset=5, length=20)

        self.assertEqual(now_jobs + future_jobs, list(jobs))
        self.assertEqual(expected_slice, list(jobs_slice))
        self.assertEqual(expected_until_slice, list(jobs_until_slice))

    def test_get_jobs_to_queue(self):
        """
        Ensure that jobs scheduled the future are not queued.
        """
        now = datetime.utcnow()
        job = self.scheduler.enqueue_at(now, say_hello)
        self.assertIn(job, self.scheduler.get_jobs_to_queue())
        future_time = now + timedelta(hours=1)
        job = self.scheduler.enqueue_at(future_time, say_hello)
        self.assertNotIn(job, self.scheduler.get_jobs_to_queue())

    def test_enqueue_job(self):
        """
        When scheduled job is enqueued, make sure:
        - Job is removed from the sorted set of scheduled jobs
        - "enqueued_at" attribute is properly set
        - Job appears in the right queue
        - Queue is recognized by rq's Queue.all()
        """
        now = datetime.utcnow()
        queue_name = 'foo'
        scheduler = Scheduler(connection=self.testconn, queue_name=queue_name)

        job = scheduler.enqueue_at(now, say_hello)
        self.scheduler.enqueue_job(job)
        self.assertNotIn(job, tl(self.testconn.zrange(scheduler.scheduled_jobs_key, 0, 10)))
        job = Job.fetch(job.id, connection=self.testconn)
        self.assertTrue(job.enqueued_at is not None)
        queue = scheduler.get_queue_for_job(job)
        self.assertIn(job, queue.jobs)
        queue = Queue.from_queue_key('rq:queue:{0}'.format(queue_name))
        self.assertIn(job, queue.jobs)
        self.assertIn(queue, Queue.all())

    def test_enqueue_job_with_queue(self):
        """
        Ensure that job is enqueued correctly when the scheduler is bound
        to a queue object.
        """
        queue = Queue('foo', connection=self.testconn)
        scheduler = Scheduler(connection=self.testconn, queue=queue)
        job = scheduler._create_job(say_hello)
        scheduler_queue = scheduler.get_queue_for_job(job)
        self.assertEqual(queue, scheduler_queue)
        scheduler.enqueue_job(job)
        self.assertTrue(job.enqueued_at is not None)
        self.assertIn(job, queue.jobs)
        self.assertIn(queue, Queue.all())

    def test_job_membership(self):
        now = datetime.utcnow()
        job = self.scheduler.enqueue_at(now, say_hello)
        self.assertIn(job, self.scheduler)
        self.assertIn(job.id, self.scheduler)
        self.assertNotIn("non-existing-job-id", self.scheduler)

    def test_cancel_scheduled_job(self):
        """
        When scheduled job is canceled, make sure:
        - Job is removed from the sorted set of scheduled jobs
        """
        # schedule a job to be enqueued one minute from now
        time_delta = timedelta(minutes=1)
        job = self.scheduler.enqueue_in(time_delta, say_hello)
        # cancel the scheduled job and check that it's gone from the set
        self.scheduler.cancel(job)
        self.assertNotIn(job.id, tl(self.testconn.zrange(
            self.scheduler.scheduled_jobs_key, 0, 1)))

    def test_change_execution_time(self):
        """
        Ensure ``change_execution_time`` is called, ensure that job's score is updated
        """
        job = self.scheduler.enqueue_at(datetime.utcnow(), say_hello)
        new_date = datetime(2010, 1, 1)
        self.scheduler.change_execution_time(job, new_date)
        self.assertEqual(to_unix(new_date),
            self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id))
        self.scheduler.cancel(job)
        self.assertRaises(ValueError, self.scheduler.change_execution_time, job, new_date)

    def test_args_kwargs_are_passed_correctly(self):
        """
        Ensure that arguments and keyword arguments are properly saved to jobs.
        """
        job = self.scheduler.enqueue_at(datetime.utcnow(), simple_addition, 1, 1, 1)
        self.assertEqual(job.args, (1, 1, 1))
        job = self.scheduler.enqueue_at(datetime.utcnow(), simple_addition, z=1, y=1, x=1)
        self.assertEqual(job.kwargs, {'x': 1, 'y': 1, 'z': 1})
        job = self.scheduler.enqueue_at(datetime.utcnow(), simple_addition, 1, z=1, y=1)
        self.assertEqual(job.kwargs, {'y': 1, 'z': 1})
        self.assertEqual(job.args, (1,))

        time_delta = timedelta(minutes=1)
        job = self.scheduler.enqueue_in(time_delta, simple_addition, 1, 1, 1)
        self.assertEqual(job.args, (1, 1, 1))
        job = self.scheduler.enqueue_in(time_delta, simple_addition, z=1, y=1, x=1)
        self.assertEqual(job.kwargs, {'x': 1, 'y': 1, 'z': 1})
        job = self.scheduler.enqueue_in(time_delta, simple_addition, 1, z=1, y=1)
        self.assertEqual(job.kwargs, {'y': 1, 'z': 1})
        self.assertEqual(job.args, (1,))

    def test_interval_and_repeat_persisted_correctly(self):
        """
        Ensure that interval and repeat attributes are correctly saved.
        """
        job = self.scheduler.schedule(datetime.utcnow(), say_hello, interval=10, repeat=11)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job_from_queue.meta['interval'], 10)
        self.assertEqual(job_from_queue.meta['repeat'], 11)

    def test_crontab_persisted_correctly(self):
        """
        Ensure that crontab attribute gets correctly saved in Redis.
        """
        # create a job that runs one minute past each whole hour
        job = self.scheduler.cron("1 * * * *", say_hello)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job_from_queue.meta['cron_string'], "1 * * * *")

        # get the scheduled_time and convert it to a datetime object
        unix_time = self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id)
        datetime_time = from_unix(unix_time)

        # check that minute=1, seconds=0, and is within an hour
        assert datetime_time.minute == 1
        assert datetime_time.second == 0
        assert datetime_time - datetime.utcnow() < timedelta(hours=1)

    def test_crontab_sets_timeout(self):
        """
        Ensure that a job scheduled via crontab can be created with
        a custom timeout.
        """
        timeout = 13
        job = self.scheduler.cron("1 * * * *", say_hello, timeout=timeout)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job_from_queue.timeout, timeout)

    def test_crontab_sets_id(self):
        """
        Ensure that a job scheduled via crontab can be created with
        a custom id
        """
        job_id = "hello-job-id"
        job = self.scheduler.cron("1 * * * *", say_hello, id=job_id)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job_id, job_from_queue.id)

    def test_crontab_sets_default_result_ttl(self):
        """
        Ensure that a job scheduled via crontab gets proper default
        result_ttl (-1) periodic tasks.
        """
        job = self.scheduler.cron("1 * * * *", say_hello)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(-1, job_from_queue.result_ttl)

    def test_crontab_sets_description(self):
        """
        Ensure that a job scheduled via crontab can be created with
        a custom description
        """
        description = 'test description'
        job = self.scheduler.cron("1 * * * *", say_hello, description=description)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(description, job_from_queue.description)

    def test_repeat_without_interval_raises_error(self):
        # Ensure that an error is raised if repeat is specified without interval
        def create_job():
            self.scheduler.schedule(datetime.utcnow(), say_hello, repeat=11)
        self.assertRaises(ValueError, create_job)

    def test_job_with_intervals_get_rescheduled(self):
        """
        Ensure jobs with interval attribute are put back in the scheduler
        """
        time_now = datetime.utcnow()
        interval = 10
        job = self.scheduler.schedule(time_now, say_hello, interval=interval)
        self.scheduler.enqueue_job(job)
        self.assertIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))
        self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
                         to_unix(time_now) + interval)

    def test_job_with_interval_can_set_meta(self):
        """
        Ensure that jobs with interval attribute can be created with meta
        """
        time_now = datetime.utcnow()
        interval = 10
        meta = {'say': 'hello'}
        job = self.scheduler.schedule(time_now, say_hello, interval=interval, meta=meta)
        self.scheduler.enqueue_job(job)
        self.assertEqual(job.meta, meta)

    def test_job_with_crontab_get_rescheduled(self):
        # Create a job with a cronjob_string
        job = self.scheduler.cron("1 * * * *", say_hello)

        # current unix_time
        old_next_scheduled_time = self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id)

        # change crontab
        job.meta['cron_string'] = "2 * * * *"

        # enqueue the job
        self.scheduler.enqueue_job(job)

        self.assertIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))

        # check that next scheduled time has changed
        self.assertNotEqual(old_next_scheduled_time,
                            self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id))

        # check that new next scheduled time is set correctly
        expected_next_scheduled_time = to_unix(get_next_scheduled_time("2 * * * *"))
        self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
                         expected_next_scheduled_time)

    def test_job_with_repeat(self):
        """
        Ensure jobs with repeat attribute are put back in the scheduler
        X (repeat) number of times
        """
        time_now = datetime.utcnow()
        interval = 10
        # If job is repeated once, the job shouldn't be put back in the queue
        job = self.scheduler.schedule(time_now, say_hello, interval=interval, repeat=1)
        self.scheduler.enqueue_job(job)
        self.assertNotIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))

        # If job is repeated twice, it should only be put back in the queue once
        job = self.scheduler.schedule(time_now, say_hello, interval=interval, repeat=2)
        self.scheduler.enqueue_job(job)
        self.assertIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))
        self.scheduler.enqueue_job(job)
        self.assertNotIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))

    def test_missing_jobs_removed_from_scheduler(self):
        """
        Ensure jobs that don't exist when queued are removed from the scheduler.
        """
        job = self.scheduler.schedule(datetime.utcnow(), say_hello)
        job.cancel()
        list(self.scheduler.get_jobs_to_queue())
        self.assertIn(job.id, tl(self.testconn.zrange(
            self.scheduler.scheduled_jobs_key, 0, 1)))
        job.delete()
        list(self.scheduler.get_jobs_to_queue())
        self.assertNotIn(job.id, tl(self.testconn.zrange(
            self.scheduler.scheduled_jobs_key, 0, 1)))

    def test_periodic_jobs_sets_result_ttl(self):
        """
        Ensure periodic jobs set result_ttl to infinite.
        """
        job = self.scheduler.schedule(datetime.utcnow(), say_hello, interval=5)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job.result_ttl, -1)

    def test_periodic_jobs_sets_ttl(self):
        """
        Ensure periodic jobs sets correctly ttl.
        """
        job = self.scheduler.schedule(datetime.utcnow(), say_hello, interval=5, ttl=4)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job.ttl, 4)

    def test_periodic_jobs_sets_meta(self):
        """
        Ensure periodic jobs sets correctly meta.
        """
        meta = {'say': 'hello'}
        job = self.scheduler.schedule(datetime.utcnow(), say_hello, interval=5, meta=meta)
        self.assertEqual(meta, job.meta)

    def test_periodic_job_sets_id(self):
        """
        Ensure that ID is passed to RQ by schedule.
        """
        job = self.scheduler.schedule(datetime.utcnow(), say_hello, interval=5, id='id test')
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual('id test', job.id)

    def test_periodic_job_sets_description(self):
        """
        Ensure that description is passed to RQ by schedule.
        """
        job = self.scheduler.schedule(datetime.utcnow(), say_hello, interval=5, description='description')
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual('description', job.description)

    def test_run(self):
        """
        Check correct signal handling in Scheduler.run().
        """
        def send_stop_signal():
            """
            Sleep for 1 second, then send a INT signal to ourself, so the
            signal handler installed by scheduler.run() is called.
            """
            time.sleep(1)
            os.kill(os.getpid(), signal.SIGINT)
        thread = Thread(target=send_stop_signal)
        thread.start()
        self.assertRaises(SystemExit, self.scheduler.run)
        thread.join()

    def test_run_burst(self):
        """
        Check burst mode of Scheduler.run().
        """
        now = datetime.utcnow()
        job = self.scheduler.enqueue_at(now, say_hello)
        self.assertIn(job, self.scheduler.get_jobs_to_queue())
        self.assertEqual(len(list(self.scheduler.get_jobs())), 1)
        self.scheduler.run(burst=True)
        self.assertEqual(len(list(self.scheduler.get_jobs())), 0)

    def test_scheduler_w_o_explicit_connection(self):
        """
        Ensure instantiating Scheduler w/o explicit connection works.
        """
        s = Scheduler()
        self.assertEqual(s.connection, self.testconn)

    def test_small_float_interval(self):
        """
        Test that scheduler accepts 'interval' of type float, less than 1 second.
        """
        key = Scheduler.scheduler_key
        lock_key = '%s_lock' % Scheduler.scheduler_key
        self.assertNotIn(key, tl(self.testconn.keys('*')))
        scheduler = Scheduler(connection=self.testconn, interval=0.1)   # testing interval = 0.1 second
        self.assertEqual(scheduler._interval, 0.1)

        #acquire lock
        self.assertTrue(scheduler.acquire_lock())
        self.assertIn(lock_key, tl(self.testconn.keys('*')))
        self.assertEqual(self.testconn.ttl(lock_key), 10)  # int(0.1) + 10 = 10

        #enqueue a job
        now = datetime.utcnow()
        job = scheduler.enqueue_at(now, say_hello)
        self.assertIn(job, self.scheduler.get_jobs_to_queue())
        self.assertEqual(len(list(self.scheduler.get_jobs())), 1)

        #remove the lock
        scheduler.remove_lock()

        #test that run works with the small floating-point interval
        def send_stop_signal():
            """
            Sleep for 1 second, then send a INT signal to ourself, so the
            signal handler installed by scheduler.run() is called.
            """
            time.sleep(1)
            os.kill(os.getpid(), signal.SIGINT)
        thread = Thread(target=send_stop_signal)
        thread.start()
        self.assertRaises(SystemExit, scheduler.run)
        thread.join()

        #all jobs must have been scheduled during 1 second
        self.assertEqual(len(list(scheduler.get_jobs())), 0)
Пример #33
0
class RedisWorker(object):
    TIMEOUT = settings.JOB_TIMEOUT
    FREQUENCES = ["minutely", "hourly", "daily", "weekly", "monthly", "yearly"]

    def __init__(self):
        import rq
        import redis
        from rq_scheduler import Scheduler

        self.conn = redis.from_url(settings.REDIS_URL)
        self.queue = rq.Queue("default", connection=self.conn, default_timeout=RedisWorker.TIMEOUT)
        self.scheduler = Scheduler("high", connection=self.conn)
        rq.use_connection(self.conn)

    def run(self, collector, **kwargs):
        class_name = "%s.%s" % (collector.__class__.__module__, collector.__class__.__name__)
        collector_params = collector.get_params()
        if len(self.queue.all()) >= 20:
            warning("More than 20 jobs in the queue")
        return self.queue.enqueue(collector.run, collector=class_name, params=collector_params, **kwargs)

    def schedule_with_interval(self, date, interval_s, collector, *arg, **kwargs):
        date = date or datetime.datetime.now()
        kwargs.update(
            {
                "collector": "%s.%s" % (collector.__class__.__module__, collector.__class__.__name__),
                "params": collector.get_params(),
            }
        )
        res = self.scheduler.schedule(
            scheduled_time=date,  # Time for first execution
            func=collector.run,  # Function to be queued
            args=arg,  # Arguments passed into function when executed
            kwargs=kwargs,  # Keyword arguments passed into function when executed
            interval=interval_s,  # Time before the function is called again, in seconds
            repeat=None,  # Repeat this number of times (None means repeat forever)
            timeout=RedisWorker.TIMEOUT,
        )
        return res

    def schedule_periodically(self, date, frequence, collector, *arg, **kwargs):
        from brokenpromises.worker import RunAndReplaceIntTheQueuePeriodically

        assert frequence in RedisWorker.FREQUENCES, "frequence %s unknown." % (frequence)
        if frequence == "minutely":
            next_date = date + datetime.timedelta(minutes=1)
        if frequence == "hourly":
            next_date = date + datetime.timedelta(hours=1)
        if frequence == "daily":
            next_date = date + datetime.timedelta(days=1)
        if frequence == "weekly":
            next_date = date + datetime.timedelta(weeks=1)
        if frequence == "monthly":
            year = date.year + (date.month + 1) / 12
            month = date.month % 12 + 1
            day = min(date.day, calendar.monthrange(year, month)[1])
            next_date = datetime.datetime(year, month, day, date.hour, date.minute, date.second)
        if frequence == "yearly":
            year = date.year + 1
            day = min(date.day, calendar.monthrange(year, date.month)[1])
            next_date = datetime.datetime(year, date.month, day, date.hour, date.minute, date.second)
            # Schedule in a wrapper which will requeue the job after
        self.schedule(date, RunAndReplaceIntTheQueuePeriodically(next_date, frequence, collector), *arg, **kwargs)

    def schedule(self, date, collector, *arg, **kwargs):
        res = None
        kwargs.update(
            {
                "collector": "%s.%s" % (collector.__class__.__module__, collector.__class__.__name__),
                "params": collector.get_params(),
            }
        )
        if type(date) is datetime.timedelta:
            res = self.scheduler.enqueue_in(date, collector.run, *arg, **kwargs)

        elif type(date) is datetime.datetime:
            res = self.scheduler.enqueue_at(date, collector.run, *arg, **kwargs)
        return res
Пример #34
0
class TestScheduler(RQTestCase):

    def setUp(self):
        super(TestScheduler, self).setUp()
        self.scheduler = Scheduler(connection=self.testconn)
    
    def test_birth_and_death_registration(self):
        """
        When scheduler registers it's birth, besides creating a key, it should
        also set an expiry that's a few seconds longer than it's polling
        interval so it automatically expires if scheduler is unexpectedly 
        terminated.
        """
        key = Scheduler.scheduler_key
        self.assertNotIn(key, tl(self.testconn.keys('*')))
        scheduler = Scheduler(connection=self.testconn, interval=20)
        scheduler.register_birth()
        self.assertIn(key, tl(self.testconn.keys('*')))
        self.assertEqual(self.testconn.ttl(key), 30)
        self.assertFalse(self.testconn.hexists(key, 'death'))
        self.assertRaises(ValueError, scheduler.register_birth)
        scheduler.register_death()
        self.assertTrue(self.testconn.hexists(key, 'death'))

    def test_create_job(self):
        """
        Ensure that jobs are created properly.
        """
        job = self.scheduler._create_job(say_hello, args=(), kwargs={})
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job, job_from_queue)
        self.assertEqual(job_from_queue.func, say_hello)

    def test_job_not_persisted_if_commit_false(self):
        """
        Ensure jobs are only saved to Redis if commit=True.
        """
        job = self.scheduler._create_job(say_hello, commit=False)
        self.assertEqual(self.testconn.hgetall(job.key), {})

    def test_create_scheduled_job(self):
        """
        Ensure that scheduled jobs are put in the scheduler queue with the right score
        """
        scheduled_time = datetime.utcnow()
        job = self.scheduler.enqueue_at(scheduled_time, say_hello)
        self.assertEqual(job, Job.fetch(job.id, connection=self.testconn))
        self.assertIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))
        self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
                         to_unix(scheduled_time))

    def test_enqueue_in(self):
        """
        Ensure that jobs have the right scheduled time.
        """
        right_now = datetime.utcnow()
        time_delta = timedelta(minutes=1)
        job = self.scheduler.enqueue_in(time_delta, say_hello)
        self.assertIn(job.id,
                      tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))
        self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
                         to_unix(right_now + time_delta))
        time_delta = timedelta(hours=1)
        job = self.scheduler.enqueue_in(time_delta, say_hello)
        self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
                         to_unix(right_now + time_delta))

    def test_get_jobs(self):
        """
        Ensure get_jobs() returns all jobs until the specified time.
        """
        now = datetime.utcnow()
        job = self.scheduler.enqueue_at(now, say_hello)
        self.assertIn(job, self.scheduler.get_jobs(now))
        future_time = now + timedelta(hours=1)
        job = self.scheduler.enqueue_at(future_time, say_hello)
        self.assertIn(job, self.scheduler.get_jobs(timedelta(hours=1, seconds=1)))
        self.assertIn(job, [j[0] for j in self.scheduler.get_jobs(with_times=True)])
        self.assertIsInstance(self.scheduler.get_jobs(with_times=True)[0][1], datetime)
        self.assertNotIn(job, self.scheduler.get_jobs(timedelta(minutes=59, seconds=59)))
    
    def test_get_jobs_to_queue(self):
        """
        Ensure that jobs scheduled the future are not queued.
        """
        now = datetime.utcnow()
        job = self.scheduler.enqueue_at(now, say_hello)
        self.assertIn(job, self.scheduler.get_jobs_to_queue())
        future_time = now + timedelta(hours=1)
        job = self.scheduler.enqueue_at(future_time, say_hello)
        self.assertNotIn(job, self.scheduler.get_jobs_to_queue())

    def test_enqueue_job(self):
        """
        When scheduled job is enqueued, make sure:
        - Job is removed from the sorted set of scheduled jobs
        - "enqueued_at" attribute is properly set
        - Job appears in the right queue
        """
        now = datetime.utcnow()
        queue_name = 'foo'
        scheduler = Scheduler(connection=self.testconn, queue_name=queue_name)

        job = scheduler.enqueue_at(now, say_hello)
        self.scheduler.enqueue_job(job)
        self.assertNotIn(job, tl(self.testconn.zrange(scheduler.scheduled_jobs_key, 0, 10)))
        job = Job.fetch(job.id, connection=self.testconn)
        self.assertTrue(job.enqueued_at is not None)
        queue = scheduler.get_queue_for_job(job)
        self.assertIn(job, queue.jobs)
        queue = Queue.from_queue_key('rq:queue:{0}'.format(queue_name))
        self.assertIn(job, queue.jobs)

    def test_job_membership(self):
        now = datetime.utcnow()
        job = self.scheduler.enqueue_at(now, say_hello)
        self.assertIn(job, self.scheduler)
        self.assertIn(job.id, self.scheduler)
        self.assertNotIn("non-existing-job-id", self.scheduler)

    def test_cancel_scheduled_job(self):
        """
        When scheduled job is canceled, make sure:
        - Job is removed from the sorted set of scheduled jobs
        """
        # schedule a job to be enqueued one minute from now
        time_delta = timedelta(minutes=1)
        job = self.scheduler.enqueue_in(time_delta, say_hello)
        # cancel the scheduled job and check that it's gone from the set
        self.scheduler.cancel(job)
        self.assertNotIn(job.id, tl(self.testconn.zrange(
            self.scheduler.scheduled_jobs_key, 0, 1)))

    def test_change_execution_time(self):
        """
        Ensure ``change_execution_time`` is called, ensure that job's score is updated
        """
        job = self.scheduler.enqueue_at(datetime.utcnow(), say_hello)
        new_date = datetime(2010, 1, 1)
        self.scheduler.change_execution_time(job, new_date)
        self.assertEqual(to_unix(new_date),
            self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id))
        self.scheduler.cancel(job)
        self.assertRaises(ValueError, self.scheduler.change_execution_time, job, new_date)

    def test_args_kwargs_are_passed_correctly(self):
        """
        Ensure that arguments and keyword arguments are properly saved to jobs.
        """
        job = self.scheduler.enqueue_at(datetime.utcnow(), simple_addition, 1, 1, 1)
        self.assertEqual(job.args, (1, 1, 1))
        job = self.scheduler.enqueue_at(datetime.utcnow(), simple_addition, z=1, y=1, x=1)
        self.assertEqual(job.kwargs, {'x': 1, 'y': 1, 'z': 1})
        job = self.scheduler.enqueue_at(datetime.utcnow(), simple_addition, 1, z=1, y=1)
        self.assertEqual(job.kwargs, {'y': 1, 'z': 1})
        self.assertEqual(job.args, (1,))

        time_delta = timedelta(minutes=1)
        job = self.scheduler.enqueue_in(time_delta, simple_addition, 1, 1, 1)
        self.assertEqual(job.args, (1, 1, 1))
        job = self.scheduler.enqueue_in(time_delta, simple_addition, z=1, y=1, x=1)
        self.assertEqual(job.kwargs, {'x': 1, 'y': 1, 'z': 1})
        job = self.scheduler.enqueue_in(time_delta, simple_addition, 1, z=1, y=1)
        self.assertEqual(job.kwargs, {'y': 1, 'z': 1})
        self.assertEqual(job.args, (1,))

    def test_enqueue_is_deprecated(self):
        """
        Ensure .enqueue() throws a DeprecationWarning
        """
        with warnings.catch_warnings(record=True) as w:
            # Enable all warnings
            warnings.simplefilter("always")
            job = self.scheduler.enqueue(datetime.utcnow(), say_hello)
            self.assertEqual(1, len(w))
            self.assertEqual(w[0].category, DeprecationWarning)

    def test_enqueue_periodic(self):
        """
        Ensure .enqueue_periodic() throws a DeprecationWarning
        """
        with warnings.catch_warnings(record=True) as w:
            # Enable all warnings
            warnings.simplefilter("always")
            job = self.scheduler.enqueue_periodic(datetime.utcnow(), 1, None, say_hello)
            self.assertEqual(1, len(w))
            self.assertEqual(w[0].category, DeprecationWarning)

    def test_interval_and_repeat_persisted_correctly(self):
        """
        Ensure that interval and repeat attributes get correctly saved in Redis.
        """
        job = self.scheduler.schedule(datetime.utcnow(), say_hello, interval=10, repeat=11)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job_from_queue.meta['interval'], 10)
        self.assertEqual(job_from_queue.meta['repeat'], 11)

    def test_repeat_without_interval_raises_error(self):
        # Ensure that an error is raised if repeat is specified without interval
        def create_job():
            self.scheduler.schedule(datetime.utcnow(), say_hello, repeat=11)
        self.assertRaises(ValueError, create_job)

    def test_job_with_intervals_get_rescheduled(self):
        """
        Ensure jobs with interval attribute are put back in the scheduler
        """
        time_now = datetime.utcnow()
        interval = 10
        job = self.scheduler.schedule(time_now, say_hello, interval=interval)
        self.scheduler.enqueue_job(job)
        self.assertIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))
        self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
                         to_unix(time_now) + interval)

        # Now the same thing using enqueue_periodic
        job = self.scheduler.enqueue_periodic(time_now, interval, None, say_hello)
        self.scheduler.enqueue_job(job)
        self.assertIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))
        self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
                         to_unix(time_now) + interval)

    def test_job_with_repeat(self):
        """
        Ensure jobs with repeat attribute are put back in the scheduler
        X (repeat) number of times
        """
        time_now = datetime.utcnow()
        interval = 10
        # If job is repeated once, the job shouldn't be put back in the queue
        job = self.scheduler.schedule(time_now, say_hello, interval=interval, repeat=1)
        self.scheduler.enqueue_job(job)
        self.assertNotIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))

        # If job is repeated twice, it should only be put back in the queue once
        job = self.scheduler.schedule(time_now, say_hello, interval=interval, repeat=2)
        self.scheduler.enqueue_job(job)
        self.assertIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))
        self.scheduler.enqueue_job(job)
        self.assertNotIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))

        time_now = datetime.utcnow()
        # Now the same thing using enqueue_periodic
        job = self.scheduler.enqueue_periodic(time_now, interval, 1, say_hello)
        self.scheduler.enqueue_job(job)
        self.assertNotIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))

        # If job is repeated twice, it should only be put back in the queue once
        job = self.scheduler.enqueue_periodic(time_now, interval, 2, say_hello)
        self.scheduler.enqueue_job(job)
        self.assertIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))
        self.scheduler.enqueue_job(job)
        self.assertNotIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))

    def test_missing_jobs_removed_from_scheduler(self):
        """
        Ensure jobs that don't exist when queued are removed from the scheduler.
        """
        job = self.scheduler.schedule(datetime.utcnow(), say_hello)
        job.cancel()
        self.scheduler.get_jobs_to_queue()
        self.assertNotIn(job.id, tl(self.testconn.zrange(
            self.scheduler.scheduled_jobs_key, 0, 1)))

    def test_periodic_jobs_sets_ttl(self):
        """
        Ensure periodic jobs set result_ttl to infinite.
        """
        job = self.scheduler.schedule(datetime.utcnow(), say_hello, interval=5)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job.result_ttl, -1)

    def test_run(self):
        """
        Check correct signal handling in Scheduler.run().
        """
        def send_stop_signal():
            """
            Sleep for 1 second, then send a INT signal to ourself, so the
            signal handler installed by scheduler.run() is called.
            """
            time.sleep(1)
            os.kill(os.getpid(), signal.SIGINT)
        thread = Thread(target=send_stop_signal)
        thread.start()
        self.assertRaises(SystemExit, self.scheduler.run)
        thread.join()

    def test_scheduler_w_o_explicit_connection(self):
        """
        Ensure instantiating Scheduler w/o explicit connection works.
        """
        s = Scheduler()
        self.assertEqual(s.connection, self.testconn)

    def test_no_functions_from__main__module(self):
        """
        Ensure functions from the __main__ module are not accepted for scheduling.
        """
        def dummy():
            return 1
        # Fake __main__ module function
        dummy.__module__ = "__main__"
        self.assertRaises(ValueError, self.scheduler._create_job, dummy)
Пример #35
0
def main():
    parser = argparse.ArgumentParser()

    parser.add_argument("experiment",
                        help="The experiment")

    parser.add_argument("job",
                         choices=["sticky_comment_intervene", "tidy", "archive_submissions", "send_newcomer_messages"],
                         help="The job associated with the experiment")

    parser.add_argument("interval",
                        default = 120, # default 2 minutes
                        help="Interval between tasks in seconds (default 2 minutes)")
    parser.add_argument("-e", '--env',
                        choices=['development', 'test', 'production'],
                        required = False,
                        help="Run within a specific environment. Otherwise run under the environment defined in the environment variable CS_ENV")
    parser.add_argument("-p", "--profile",
                        required = False,
                        action = 'store_true',
                        help="Run the performance profiler and save the results in the logs/profiles directory")

    args = parser.parse_args()

    # if the user specified the environment, set it here
    if args.env!=None:
        os.environ['CS_ENV'] = args.env
    
    queue_name = os.environ['CS_ENV']
    scheduler = Scheduler(queue_name = os.environ['CS_ENV'], connection=Redis())


    timeout_seconds = 172800 ## two days in seconds
    if(timeout_seconds <= int(args.interval) + 3600):
        timeout_seconds = int(args.interval) + 3600
    ttl = timeout_seconds + 180

    experiment_file = os.path.join(BASE_DIR, "config", "experiments") + "/" + args.experiment + ".yml"
    if(os.path.isfile(experiment_file) == False):
        print("File {0} not found. Ignoring schedule command.".format(experiment_file))
        sys.exit(1)


    if(args.job == "sticky_comment_intervene"):
        scheduler.schedule(
                scheduled_time=datetime.utcnow(),
                func=app.controller.conduct_sticky_comment_experiment,
                args=[args.experiment],
                kwargs={'_profile': args.profile},
                interval=int(args.interval),
                repeat=None,
                timeout = timeout_seconds,
                result_ttl = ttl)
    elif(args.job == "tidy"):
        scheduler.schedule(
                scheduled_time=datetime.utcnow(),
                func=app.controller.remove_experiment_replies,
                args=[args.experiment],
                kwargs={'_profile': args.profile},
                interval=int(args.interval),
                repeat=None,
                timeout = timeout_seconds,
                result_ttl = ttl)
    elif(args.job == "archive_submissions"):
        scheduler.schedule(
                scheduled_time=datetime.utcnow(),
                func=app.controller.archive_experiment_submission_metadata,
                args=[args.experiment],
                kwargs={'_profile': args.profile},
                interval=int(args.interval),
                repeat=None,
                timeout = timeout_seconds,
                result_ttl = ttl)
    elif(args.job == "send_newcomer_messages"):
        scheduler.schedule(
                scheduled_time=datetime.utcnow(),
                func=app.controller.update_newcomer_messaging_experiment,
                args=[args.experiment],
                kwargs={'_profile': args.profile},
                interval=int(args.interval),
                repeat=None,
                timeout = timeout_seconds,
                result_ttl = ttl)
Пример #36
0
from redis import Redis
from rq_scheduler import Scheduler
from datetime import datetime

from rq import Queue
from rq.job import Job
from flask import request, render_template, url_for, Flask
from emailing.email_for_rq_testing import send_recommendation_email
from greting import print_greeting

from model import Book, User, Recommendation, UserBook, connect_to_db, db
from email.MIMEMultipart import MIMEMultipart
from email.MIMEText import MIMEText

app = Flask(__name__)

scheduler = Scheduler(connection=Redis())

time = datetime(2016, 3, 3, 2, 10)

scheduler.schedule(
    scheduled_time=time,  # Time for first execution, in UTC timezone
    func=send_recommendation_email,  # Function to be queued
    # args = ,
    interval=120,  # Time before the function is called again, in seconds
    repeat=5  # Repeat this number of times (None means repeat forever)
)

# 86400 seconds in a day
Пример #37
0
def main():
    parser = argparse.ArgumentParser()

    parser.add_argument(
        "sub", help="The subreddit to query (or all for the frontpage)")

    parser.add_argument(
        "pagetype",
        choices=["new", "top", "contr", "hot", "comments", "modactions"],
        help="For front pages, what page to query")
    parser.add_argument(
        "interval",
        default=120,  # default 2 minutes
        help="Interval between tasks in seconds (default 2 minutes)")
    parser.add_argument(
        "-e",
        '--env',
        choices=['development', 'test', 'production'],
        required=False,
        help=
        "Run within a specific environment. Otherwise run under the environment defined in the environment variable CS_ENV"
    )

    args = parser.parse_args()

    # if the user specified the environment, set it here
    if args.env != None:
        os.environ['CS_ENV'] = args.env

    queue_name = os.environ['CS_ENV']
    scheduler = Scheduler(queue_name=os.environ['CS_ENV'], connection=Redis())

    page_type = args.pagetype.lower()

    ttl = 172800  ## two days in seconds
    if (ttl <= int(args.interval) + 3600):
        ttl = int(args.interval) + 3600

    if (args.sub == "all"):
        page_type = getattr(PageType, args.pagetype.upper())
        scheduler.schedule(scheduled_time=datetime.utcnow(),
                           func=app.controller.fetch_reddit_front,
                           args=[page_type],
                           interval=int(args.interval),
                           repeat=None,
                           result_ttl=ttl)


#                result_ttl=int(args.interval)+10)
## we set the result_ttl to longer than the interval
## so the job gets rescheduled
    else:
        if (page_type == "comments"):
            scheduler.schedule(
                scheduled_time=datetime.utcnow(),
                func=app.controller.fetch_last_thousand_comments,
                args=[args.sub],
                interval=int(args.interval),
                repeat=None,
                result_ttl=ttl)
        elif (page_type == "modactions"):
            scheduler.schedule(scheduled_time=datetime.utcnow(),
                               func=app.controller.fetch_mod_action_history,
                               args=[args.sub],
                               interval=int(args.interval),
                               repeat=None,
                               result_ttl=ttl)
        else:
            page_type = getattr(PageType, args.pagetype.upper())
            scheduler.schedule(scheduled_time=datetime.utcnow(),
                               func=app.controller.fetch_subreddit_front,
                               args=[args.sub, page_type],
                               interval=int(args.interval),
                               repeat=None,
                               result_ttl=ttl)
Пример #38
0
@app.route("/results/<job_key>", methods=['GET'])
def get_results(job_key):

    job = Job.fetch(job_key, connection=conn)

    if job.is_finished:
        result = GPSTrip.query.filter_by(id=job.result).first()
        return jsonify(result)
    else:
        return "Nay!", 202


@app.route("/scheduler", methods=['GET'])
def monitorer():
    list_of_job_instances = scheduler.get_jobs()
    print(list_of_job_instances)
    return str(list_of_job_instances)


if __name__ == '__main__':

    job = scheduler.schedule(
        scheduled_time=datetime.utcnow(
        ),  # Time for first execution, in UTC timezone
        func=dailyScrape,  # Function to be queued
        interval=60,  # Time before the function is called again, in seconds
        repeat=2,  # Repeat this number of times (None means repeat forever)
        meta={'foo': 'bar'}  # Arbitrary pickleable data on the job itself
    )
    print("Scheduler: " + job.get_id())
    app.run(debug=True)
Пример #39
0
import os
import redis

# from rq import Queue
from rq_scheduler import Scheduler
from datetime import datetime
from app import loop_script

redis_url = os.getenv('REDISCLOUD_URL', 'redis://localhost:6379')
conn = redis.from_url(redis_url)

# q = Queue(connection=conn)
# result = q.enqueue(loop_script)

scheduler = Scheduler(connection=conn)

if len(scheduler.get_jobs()) > 0:
    for job in scheduler.get_jobs():
        scheduler.cancel(job)

scheduler.schedule(scheduled_time=datetime.utcnow(),
                   func=loop_script,
                   interval=30)

print(scheduler.get_jobs())
Пример #40
0
from datetime import datetime

from rq import Queue
from rq.job import Job
from flask import request, render_template, url_for, Flask
from emailing.email_for_rq_testing import send_recommendation_email
from greting import print_greeting

from model import Book, User, Recommendation, UserBook, connect_to_db, db
from email.MIMEMultipart import MIMEMultipart
from email.MIMEText import MIMEText


app = Flask(__name__)


scheduler = Scheduler(connection=Redis())

time = datetime(2016, 3, 3, 2, 10)

scheduler.schedule(
    scheduled_time=time, # Time for first execution, in UTC timezone
    func=send_recommendation_email,        # Function to be queued
    # args = ,
    interval=120,                   # Time before the function is called again, in seconds
    repeat=5                      # Repeat this number of times (None means repeat forever)
)


# 86400 seconds in a day