Exemple #1
0
def setup_tasks(scheduler: rq_scheduler.Scheduler):
    # scheduler.cron(
    #     '0 0 * * *',
    #     'app.tasks.drop_stale_submissions'
    # )

    scheduler.cron('* * * * *', 'app.tasks.create_protests')
def add_scheduled_task(request):
    task = request.GET.get('task')
    cron_string = request.GET.get('cron_string')
    cron_string = urllib.unquote(cron_string)

    print cron_string

    queue = request.GET.get('queue')
    parameters = request.GET.get('parameters')
    scheduler = Scheduler(queue_name=queue, connection=tasks.redis_conn)

    if parameters:
        args = (parameters,)
    else:
        args = None

    print tasks

    scheduler.cron(
        cron_string=cron_string,   # Time for first execution
        func=getattr(tasks, task),       # Function to be queued
        args=args,
        repeat=None,                      # Repeat this number of times (None means repeat forever),
        timeout=14400,
        queue_name=queue,
    )

    return HttpResponse('Success')
def test_schedule_jobs_bad_current(mocker, queue, jobid):
    sch = Scheduler(queue=queue, connection=queue.connection)
    id0 = 'jobid0'
    sch.cron('* * * * *', func=noop, id=id0, meta={})
    jobs.schedule_jobs(sch)
    assert jobid in sch
    assert id0 not in sch
    assert len(list(sch.get_jobs())) == 1
Exemple #4
0
def schedule_demo_job():
    redis_conn = Redis(host=_redis_host, port=_redis_port, db=_redis_db)
    worker_queue = 'async-jobs' if _worker_queue is None or len(
        _worker_queue.strip()) == 0 else _worker_queue

    scheduler = Scheduler(connection=redis_conn)
    scheduler.cron(cron_string="*/5 * * * *",
                   func="demo_job.run_demo_job",
                   repeat=None,
                   queue_name=worker_queue)

    return scheduler
def run_schedule(dev_id, dev_state, dev_time):
    # import ipdb
    # ipdb.set_trace()
    scheduler = Scheduler(connection=Redis())  # Get a scheduler for the "default" queue

    scheduler.cron(
    dev_time,                # A cron string (e.g. "0 0 * * 0")
    func=create_log,                  # Function to be queued
    args=[dev_id, dev_state],
    repeat=None,                  # Repeat this number of times (None means repeat forever)

)
    return dev_id
def set_schedule(conn, task, cron_string):

    s = Scheduler(connection=conn)
    kw = json.loads(task.kwargs)

    kw.update({
        "site": frappe.local.site,
        "user": frappe.session.user
    })

    job = s.cron(
        id=task.job_id,
        description=task.title,
        # A cron string (e.g. "0 0 * * 0")
        cron_string=cron_string,
        # Function to be queued
        func=task.method,
        # Arguments passed into function when executed
        # args=[task.args],
        # Keyword arguments passed into function when executed
        kwargs=kw,
        # Repeat this number of times (None means repeat forever)
        repeat=None,
        # In which queue the job should be put in
        queue_name=task.queue
    )
    print " ** scheduled in '" + task.queue + "' queue, with id: " + job.get_id() + " at " + cron_string

    return job.get_id()
Exemple #7
0
class RqClient:

    def __init__(self, conf: RqConfig, prefix: str = ''):
        self.redis_conn = Redis(host=conf.HOST, port=conf.PORT, db=conf.DB)
        self.queue = Queue(connection=self.redis_conn)
        self.prefix = prefix
        self.scheduler = Scheduler(connection=self.redis_conn, queue=self.queue)
        self.scheduler_conf_path = conf.SCHEDULER_CONF_PATH
        self.control = Control(self.redis_conn)

    def init_scheduler(self):
        # remove old scheduled tasks
        for job in self.scheduler.get_jobs():
            self.scheduler.cancel(job)

        # create new tasks from config file
        if self.scheduler_conf_path:
            with open(self.scheduler_conf_path) as f:
                for entry in json.load(f):
                    self.scheduler.cron(
                        entry['schedule'],
                        f'{self.prefix}.{entry["task"]}',
                        kwargs=entry['kwargs'] if 'kwargs' in entry else None
                    )

    def send_task(self, name, args=None, time_limit=None, soft_time_limit=None):
        try:
            job = self.queue.enqueue(f'{self.prefix}.{name}', ttl=time_limit, args=args)
            return ResultWrapper(job)
        except Exception as ex:
            logging.getLogger(__name__).error(ex)

    def AsyncResult(self, ident):
        try:
            return ResultWrapper(Job.fetch(ident, connection=self.redis_conn))
        except NoSuchJobError:
            logging.getLogger(__name__).warning(f'Job {ident} not found')
            return None
Exemple #8
0
def enqueue_job(task, job, image, command, start_at, start_in, cron, logger):
    scheduler = Scheduler("jobs", connection=current_app.redis)

    args = [task.task_id, str(job.id), image, command]

    queue_job_id = None

    if start_at is not None:
        dt = datetime.utcfromtimestamp(int(start_at))
        logger.debug("Enqueuing job execution in the future...", start_at=dt)
        result = scheduler.enqueue_at(dt, run_job, *args)
        job.metadata["enqueued_id"] = str(result.id)
        queue_job_id = str(result.id)
        job.save()
        logger.info("Job execution enqueued successfully.", start_at=dt)
    elif start_in is not None:
        dt = datetime.now(tz=timezone.utc) + start_in
        logger.debug("Enqueuing job execution in the future...", start_at=dt)
        result = scheduler.enqueue_at(dt, run_job, *args)
        job.metadata["enqueued_id"] = str(result.id)
        queue_job_id = str(result.id)
        job.save()
        logger.info("Job execution enqueued successfully.", start_at=dt)
    elif cron is not None:
        logger.debug("Enqueuing job execution using cron...", cron=cron)
        result = scheduler.cron(
            cron,  # A cron string (e.g. "0 0 * * 0")
            func=run_job,
            args=args,
            repeat=None,
            queue_name="jobs",
        )
        job.metadata["enqueued_id"] = str(result.id)
        queue_job_id = str(result.id)
        job.metadata["cron"] = cron
        job.scheduled = True
        job.save()
        logger.info("Job execution enqueued successfully.", cron=cron)
    else:
        logger.debug("Enqueuing job execution...")
        result = current_app.job_queue.enqueue(run_job, *args, timeout=-1)
        queue_job_id = result.id
        job.metadata["enqueued_id"] = result.id
        job.save()
        logger.info("Job execution enqueued successfully.")

    return queue_job_id
Exemple #9
0
def start_scheduler(redis_url, redis_password=None, queue_name='job_scheduler_queue'):
    queue = redis_queue(redis_url, redis_password, queue_name)
    scheduler = Scheduler(queue_name=queue.name, connection=queue.connection)

    queue.empty()
    for job in scheduler.get_jobs():
        scheduler.cancel(job)
        logger.info(f"Removed old job {job} from scheduler.")

    # add jobs to scheduler
    job = scheduler.cron(
        cron_string="* * * * *",  # once a minute
        func=log_review,
        args=[datetime.now(), choice(['Alice', 'Bob', 'Carol', 'Dave'])],
        queue_name=queue.name,
        repeat=None
    )
    logger.info(f"Added job {job}")

    return scheduler
Exemple #10
0
    def __add_to_scheduler(self, crons: List[str]):
        rq_job_ids = []
        if crons:
            queue = Queue(SchedulerDB.submit_queue_name,
                          connection=self.con,
                          default_timeout=self.timeout)
            scheduler = Scheduler(queue=queue, connection=self.con)
            for cron in crons:
                rq_job = scheduler.cron(
                    cron,
                    func=self.submit,
                    # args=self.args,
                    # kwargs=self.kwargs,
                    repeat=None,
                    queue_name=queue.name,
                    timeout=self.timeout,
                )
                self.logger.debug(
                    f"{self} with cron '{cron}' has been added to rq scheduler with id {rq_job.id}"
                )
                rq_job_ids.append(rq_job.id)
            self.logger.debug(f"{self} has been added to the rq scheduler")

        return rq_job_ids
Exemple #11
0
class RqClient:
    def __init__(self, conf: RqConfig, prefix: str = ''):
        self.redis_conn = Redis(host=conf.HOST, port=conf.PORT, db=conf.DB)
        self.queue = Queue(connection=self.redis_conn)
        self.prefix = prefix
        self.scheduler = Scheduler(connection=self.redis_conn,
                                   queue=self.queue)
        self.scheduler_conf_path = conf.SCHEDULER_CONF_PATH
        self.control = Control(self.redis_conn)

    def init_scheduler(self):
        # remove old scheduled tasks
        for job in self.scheduler.get_jobs():
            self.scheduler.cancel(job)

        # create new tasks from config file
        if self.scheduler_conf_path:
            with open(self.scheduler_conf_path) as f:
                for entry in json.load(f):
                    self.scheduler.cron(
                        entry['schedule'],
                        f'{self.prefix}.{entry["task"]}',
                        kwargs=entry['kwargs'] if 'kwargs' in entry else None)

    @staticmethod
    def _resolve_limit(softl, hardl):
        if softl is not None and hardl is not None:
            return min(softl, hardl)
        elif softl is not None:
            return softl
        elif hardl is not None:
            return hardl
        return None

    def send_task(self,
                  name,
                  args=None,
                  time_limit=None,
                  soft_time_limit=None):
        """
        Send a task to the worker.

        Please note that Rq does not know hard vs. soft time limit. In case both
        values are filled in (time_limit, soft_time_limit), the smaller one is
        selected. Otherwise, the non-None is applied.
        """
        time_limit = self._resolve_limit(time_limit, soft_time_limit)
        try:
            job = self.queue.enqueue(f'{self.prefix}.{name}',
                                     job_timeoutx=time_limit,
                                     args=args)
            return ResultWrapper(job)
        except Exception as ex:
            logging.getLogger(__name__).error(ex)

    def AsyncResult(self, ident):
        try:
            return ResultWrapper(Job.fetch(ident, connection=self.redis_conn))
        except NoSuchJobError:
            logging.getLogger(__name__).warning(f'Job {ident} not found')
            return None
Exemple #12
0
class RqClient(AbstractBgClient):
    def __init__(self, conf: RqConfig, prefix: str = ''):
        self.redis_conn = Redis(host=conf.HOST, port=conf.PORT, db=conf.DB)
        self.queue = Queue(connection=self.redis_conn)
        self.prefix = prefix
        self.scheduler = Scheduler(connection=self.redis_conn,
                                   queue=self.queue)
        self.scheduler_conf_path = conf.SCHEDULER_CONF_PATH
        self._control = Control(self.redis_conn)

    def init_scheduler(self):
        # remove old scheduled tasks
        for job in self.scheduler.get_jobs():
            self.scheduler.cancel(job)

        # create new tasks from config file
        if self.scheduler_conf_path:
            with open(self.scheduler_conf_path) as f:
                for entry in json.load(f):
                    self.scheduler.cron(
                        entry['schedule'],
                        f'{self.prefix}.{entry["task"]}',
                        kwargs=entry['kwargs'] if 'kwargs' in entry else None,
                        use_local_timezone=True,
                    )
            logging.getLogger(__name__).info(
                f'Loaded configuration for Rq-scheduler from {self.scheduler_conf_path}'
            )
        else:
            logging.getLogger(__name__).warning(
                'No Rq-scheduler configuration path defined. '
                'Regular system maintenance will be disabled which may lead to disks becoming full.'
            )

    @staticmethod
    def _resolve_limit(softl, hardl):
        if softl is not None and hardl is not None:
            return min(softl, hardl)
        elif softl is not None:
            return softl
        elif hardl is not None:
            return hardl
        return None

    @property
    def control(self):
        return self._control

    def send_task(self,
                  name,
                  ans_type: Type[T],
                  args=None,
                  time_limit=None,
                  soft_time_limit=None) -> ResultWrapper[T]:
        """
        Send a task to the worker.

        Please note that Rq does not know hard vs. soft time limit. In case both
        values are filled in (time_limit, soft_time_limit), the smaller one is
        selected. Otherwise, the non-None is applied.
        """
        time_limit = self._resolve_limit(time_limit, soft_time_limit)
        try:
            job = self.queue.enqueue(f'{self.prefix}.{name}',
                                     job_timeout=time_limit,
                                     args=args)
            return ResultWrapper(job)
        except Exception as ex:
            logging.getLogger(__name__).error(ex)

    def get_task_error(self, task_id):
        try:
            job = Job.fetch(task_id, connection=self.redis_conn)
            if job.get_status() == 'failed':
                return BgCalcError(job.exc_info)
        except NoSuchJobError as ex:
            return CalcTaskNotFoundError(ex)
        return None

    def AsyncResult(self, ident):
        try:
            return ResultWrapper(Job.fetch(ident, connection=self.redis_conn))
        except NoSuchJobError:
            logging.getLogger(__name__).warning(f'Job {ident} not found')
            return None

    def is_wrapped_user_error(self, err):
        return isinstance(err, UserActionException)
Exemple #13
0
def create_task(task_id):
    details = get_details()

    if details is None or details == "":
        msg = "Failed to enqueue task because JSON body could not be parsed."
        g.logger.warn(msg)

        return make_response(msg, 400)

    image = details.get("image", None)
    command = details.get("command", None)

    if image is None or command is None:
        return make_response("image and command must be filled in the request.", 400)

    logger = g.logger.bind(task_id=task_id, image=image, command=command)

    logger.debug("Creating task...")
    task = Task.objects(task_id=task_id).modify(task_id=task_id, upsert=True, new=True)
    logger.info("Task created successfully.")

    logger.debug("Creating job...")
    retries = details.get("retries", 0)
    expiration = details.get("expiration")

    hard_limit = current_app.config["HARD_EXECUTION_TIMEOUT_SECONDS"]
    timeout = details.get("timeout", hard_limit)
    timeout = min(
        timeout, hard_limit
    )  # ensure  jobs can't specify more than hard limit

    j = task.create_job()
    j.metadata["retries"] = retries
    j.metadata["retry_count"] = 0
    j.metadata["expiration"] = expiration
    j.metadata["timeout"] = timeout
    j.metadata["envs"] = details.get("envs", {})
    j.save()
    job_id = str(j.id)
    logger.debug("Job created successfully...", job_id=job_id)

    queue_job_id = None

    start_at = details.get("startAt", None)
    start_in = parse_time(details.get("startIn", None))
    cron = details.get("cron", None)
    scheduler = Scheduler("jobs", connection=current_app.redis)

    args = [task_id, job_id, image, command]

    if start_at is not None:
        dt = datetime.utcfromtimestamp(int(start_at))
        logger.debug("Enqueuing job execution in the future...", start_at=dt)
        result = scheduler.enqueue_at(dt, run_job, *args)
        j.metadata["enqueued_id"] = result.id
        j.save()
        logger.info("Job execution enqueued successfully.", start_at=dt)
    elif start_in is not None:
        dt = datetime.now(tz=timezone.utc) + start_in
        logger.debug("Enqueuing job execution in the future...", start_at=dt)
        result = scheduler.enqueue_at(dt, run_job, *args)
        j.metadata["enqueued_id"] = result.id
        j.save()
        logger.info("Job execution enqueued successfully.", start_at=dt)
    elif cron is not None:
        logger.debug("Enqueuing job execution using cron...", cron=cron)
        result = scheduler.cron(
            cron,  # A cron string (e.g. "0 0 * * 0")
            func=run_job,
            args=args,
            repeat=None,
            queue_name="jobs",
        )
        j.metadata["enqueued_id"] = result.id
        j.metadata["cron"] = cron
        j.scheduled = True
        j.save()
        logger.info("Job execution enqueued successfully.", cron=cron)
    else:
        logger.debug("Enqueuing job execution...")
        result = current_app.job_queue.enqueue(run_job, *args, timeout=-1)
        queue_job_id = result.id
        j.metadata["enqueued_id"] = result.id
        j.save()
        logger.info("Job execution enqueued successfully.")

    job_url = url_for("task.get_job", task_id=task_id, job_id=job_id, _external=True)
    task_url = url_for("task.get_task", task_id=task_id, _external=True)

    return jsonify(
        {
            "taskId": task_id,
            "jobId": job_id,
            "queueJobId": queue_job_id,
            "jobUrl": job_url,
            "taskUrl": task_url,
        }
    )
Exemple #14
0
    setup_template_task,
)

if 'generate_pool_cache' not in scheduler:
    logging.info('adding generate pool cache task to scheduler')
    scheduler.schedule(
        id='generate_pool_cache',
        scheduled_time=datetime.datetime.utcnow(),
        func=generate_pool_cache_task,
        interval=90,
    )

if 'process_expiring_vms' not in scheduler:
    logging.info('adding process expiring VMs task to scheduler')
    scheduler.cron('0 5 * * *',
                   id='process_expiring_vms',
                   func=process_expiring_vms_task)

if 'cleanup_vnc' not in scheduler:
    logging.info('adding cleanup VNC task to scheduler')
    scheduler.schedule(
        id='cleanup_vnc',
        scheduled_time=datetime.datetime.utcnow(),
        func=cleanup_vnc_task,
        interval=3600,
    )


def add_rq_dashboard_auth(blueprint):
    @blueprint.before_request
    @auth.oidc_auth
Exemple #15
0
sqla.init_app(app)
mongo.init_app(app)

cred = credentials.Certificate(os.getenv('FIREBASE_CERT'))

try:
    firebase_admin.initialize_app(credential=cred)
except Exception:
    firebase_admin.get_app()

# preventing race condition
with Connection():
    q = Queue(connection=conn)
    scheduler = Scheduler(queue=q)  # rq scheduler
    # do some cron
    scheduler.cron(
        '0 */2 * * *',  # Time for first execution, in UTC timezone
        func=run,  # Function to be queued
        # Time before the function is called again, in seconds
        queue_name='Translink-Cron',
        # Repeat this number of times (None means repeat forever)
        repeat=2)
    print(scheduler.get_jobs())


@app.errorhandler(AuthError)
def handle_auth_error(ex):
    response = jsonify(ex.error)
    response.status_code = ex.status_code
    return response
from rq_scheduler import Scheduler
from datetime import datetime
from loguru import logger

from config import Config
from database import Post, Session

if __name__ == "__main__":
    redis_conn = Redis(
        host=Config.REDIS_HOST,
        port=Config.REDIS_PORT,
        db=Config.REDIS_DB,
    )
    scheduler = Scheduler(connection=redis_conn)

    # clean old jobs (in case of restart etc)
    for job in scheduler.get_jobs():
        scheduler.cancel(job)

    scheduler.cron(
        Config.PARSER_SCHEDULE_STRING,
        func=Post.fetch_all,
        args=[Config],
        kwargs={},
        repeat=None,  # repeat forever
        queue_name="default",
        meta={})

    logger.info(
        f"parser job issued with schedule: {Config.PARSER_SCHEDULE_STRING}")
class TestScheduler(RQTestCase):
    def setUp(self):
        super(TestScheduler, self).setUp()
        self.scheduler = Scheduler(connection=self.testconn)

    def test_acquire_lock(self):
        """
        When scheduler acquires a lock, besides creating a key, it should
        also set an expiry that's a few seconds longer than it's polling
        interval so it automatically expires if scheduler is unexpectedly
        terminated.
        """
        key = '%s_lock' % Scheduler.scheduler_key
        self.assertNotIn(key, tl(self.testconn.keys('*')))
        scheduler = Scheduler(connection=self.testconn, interval=20)
        self.assertTrue(scheduler.acquire_lock())
        self.assertIn(key, tl(self.testconn.keys('*')))
        self.assertEqual(self.testconn.ttl(key), 30)
        scheduler.remove_lock()
        self.assertNotIn(key, tl(self.testconn.keys('*')))

    def test_no_two_schedulers_acquire_lock(self):
        """
        Ensure that no two schedulers can acquire the lock at the
        same time. When removing the lock, only the scheduler which
        originally acquired the lock can remove the lock.
        """
        key = '%s_lock' % Scheduler.scheduler_key
        self.assertNotIn(key, tl(self.testconn.keys('*')))
        scheduler1 = Scheduler(connection=self.testconn, interval=20)
        scheduler2 = Scheduler(connection=self.testconn, interval=20)
        self.assertTrue(scheduler1.acquire_lock())
        self.assertFalse(scheduler2.acquire_lock())
        self.assertIn(key, tl(self.testconn.keys('*')))
        scheduler2.remove_lock()
        self.assertIn(key, tl(self.testconn.keys('*')))
        scheduler1.remove_lock()
        self.assertNotIn(key, tl(self.testconn.keys('*')))

    def test_create_job(self):
        """
        Ensure that jobs are created properly.
        """
        job = self.scheduler._create_job(say_hello, args=(), kwargs={})
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job, job_from_queue)
        self.assertEqual(job_from_queue.func, say_hello)

    def test_create_job_with_ttl(self):
        """
        Ensure that TTL is passed to RQ.
        """
        job = self.scheduler._create_job(say_hello, ttl=2, args=(), kwargs={})
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(2, job_from_queue.ttl)

    def test_create_job_with_id(self):
        """
        Ensure that ID is passed to RQ.
        """
        job = self.scheduler._create_job(say_hello,
                                         id='id test',
                                         args=(),
                                         kwargs={})
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual('id test', job_from_queue.id)

    def test_create_job_with_description(self):
        """
        Ensure that description is passed to RQ.
        """
        job = self.scheduler._create_job(say_hello,
                                         description='description',
                                         args=(),
                                         kwargs={})
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual('description', job_from_queue.description)

    def test_create_job_with_timeout(self):
        """
        Ensure that timeout is passed to RQ.
        """
        timeout = 13
        job = self.scheduler._create_job(say_hello,
                                         timeout=13,
                                         args=(),
                                         kwargs={})
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(timeout, job_from_queue.timeout)

    def test_job_not_persisted_if_commit_false(self):
        """
        Ensure jobs are only saved to Redis if commit=True.
        """
        job = self.scheduler._create_job(say_hello, commit=False)
        self.assertEqual(self.testconn.hgetall(job.key), {})

    def test_create_scheduled_job(self):
        """
        Ensure that scheduled jobs are put in the scheduler queue with the right score
        """
        scheduled_time = datetime.utcnow()
        job = self.scheduler.enqueue_at(scheduled_time, say_hello)
        self.assertEqual(job, Job.fetch(job.id, connection=self.testconn))
        self.assertIn(
            job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))
        self.assertEqual(
            self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
            to_unix(scheduled_time))

    def test_create_job_with_meta(self):
        """
        Ensure that meta information on the job is passed to rq
        """
        expected = {'say': 'hello'}
        job = self.scheduler._create_job(say_hello, meta=expected)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(expected, job_from_queue.meta)

    def test_enqueue_at_sets_timeout(self):
        """
        Ensure that a job scheduled via enqueue_at can be created with
        a custom timeout.
        """
        timeout = 13
        job = self.scheduler.enqueue_at(datetime.utcnow(),
                                        say_hello,
                                        timeout=timeout)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job_from_queue.timeout, timeout)

    def test_enqueue_at_sets_job_id(self):
        """
        Ensure that a job scheduled via enqueue_at can be created with
        a custom job id.
        """
        job_id = 'test_id'
        job = self.scheduler.enqueue_at(datetime.utcnow(),
                                        say_hello,
                                        job_id=job_id)
        self.assertEqual(job.id, job_id)

    def test_enqueue_at_sets_job_ttl(self):
        """
        Ensure that a job scheduled via enqueue_at can be created with a custom job ttl.
        """
        job_ttl = 123456789
        job = self.scheduler.enqueue_at(datetime.utcnow(),
                                        say_hello,
                                        job_ttl=job_ttl)
        self.assertEqual(job.ttl, job_ttl)

    def test_enqueue_at_sets_job_result_ttl(self):
        """
        Ensure that a job scheduled via enqueue_at can be created with a custom result ttl.
        """
        job_result_ttl = 1234567890
        job = self.scheduler.enqueue_at(datetime.utcnow(),
                                        say_hello,
                                        job_result_ttl=job_result_ttl)
        self.assertEqual(job.result_ttl, job_result_ttl)

    def test_enqueue_at_sets_meta(self):
        """
        Ensure that a job scheduled via enqueue_at can be created with a custom meta.
        """
        meta = {'say': 'hello'}
        job = self.scheduler.enqueue_at(datetime.utcnow(),
                                        say_hello,
                                        meta=meta)
        self.assertEqual(job.meta, meta)

    def test_enqueue_in(self):
        """
        Ensure that jobs have the right scheduled time.
        """
        right_now = datetime.utcnow()
        time_delta = timedelta(minutes=1)
        job = self.scheduler.enqueue_in(time_delta, say_hello)
        self.assertIn(
            job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))
        self.assertEqual(
            self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
            to_unix(right_now + time_delta))
        time_delta = timedelta(hours=1)
        job = self.scheduler.enqueue_in(time_delta, say_hello)
        self.assertEqual(
            self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
            to_unix(right_now + time_delta))

    def test_enqueue_in_sets_timeout(self):
        """
        Ensure that a job scheduled via enqueue_in can be created with
        a custom timeout.
        """
        timeout = 13
        job = self.scheduler.enqueue_in(timedelta(minutes=1),
                                        say_hello,
                                        timeout=timeout)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job_from_queue.timeout, timeout)

    def test_enqueue_in_sets_job_id(self):
        """
        Ensure that a job scheduled via enqueue_in can be created with
        a custom job id.
        """
        job_id = 'test_id'
        job = self.scheduler.enqueue_in(timedelta(minutes=1),
                                        say_hello,
                                        job_id=job_id)
        self.assertEqual(job.id, job_id)

    def test_enqueue_in_sets_job_ttl(self):
        """
        Ensure that a job scheduled via enqueue_in can be created with a custom job ttl.
        """
        job_ttl = 123456789
        job = self.scheduler.enqueue_in(timedelta(minutes=1),
                                        say_hello,
                                        job_ttl=job_ttl)
        self.assertEqual(job.ttl, job_ttl)

    def test_enqueue_in_sets_job_result_ttl(self):
        """
        Ensure that a job scheduled via enqueue_in can be created with a custom result ttl.
        """
        job_result_ttl = 1234567890
        job = self.scheduler.enqueue_in(timedelta(minutes=1),
                                        say_hello,
                                        job_result_ttl=job_result_ttl)
        self.assertEqual(job.result_ttl, job_result_ttl)

    def test_enqueue_in_sets_meta(self):
        """
        Ensure that a job scheduled via enqueue_in sets meta.
        """
        meta = {'say': 'hello'}
        job = self.scheduler.enqueue_in(timedelta(minutes=1),
                                        say_hello,
                                        meta=meta)
        self.assertEqual(job.meta, meta)

    def test_count(self):
        now = datetime.utcnow()
        self.scheduler.enqueue_at(now, say_hello)
        self.assertEqual(self.scheduler.count(), 1)

        future_time = now + timedelta(hours=1)
        future_test_time = now + timedelta(minutes=59, seconds=59)

        self.scheduler.enqueue_at(future_time, say_hello)

        self.assertEqual(
            self.scheduler.count(timedelta(minutes=59, seconds=59)), 1)
        self.assertEqual(self.scheduler.count(future_test_time), 1)
        self.assertEqual(self.scheduler.count(), 2)

    def test_get_jobs(self):
        """
        Ensure get_jobs() returns all jobs until the specified time.
        """
        now = datetime.utcnow()
        job = self.scheduler.enqueue_at(now, say_hello)
        self.assertIn(job, self.scheduler.get_jobs(now))
        future_time = now + timedelta(hours=1)
        job = self.scheduler.enqueue_at(future_time, say_hello)
        self.assertIn(job,
                      self.scheduler.get_jobs(timedelta(hours=1, seconds=1)))
        self.assertIn(job,
                      [j[0] for j in self.scheduler.get_jobs(with_times=True)])
        self.assertIsInstance(
            list(self.scheduler.get_jobs(with_times=True))[0][1], datetime)
        self.assertNotIn(
            job, self.scheduler.get_jobs(timedelta(minutes=59, seconds=59)))

    def test_get_jobs_slice(self):
        """
        Ensure get_jobs() returns the appropriate slice of all jobs using offset and length.
        """
        now = datetime.utcnow()
        future_time = now + timedelta(hours=1)
        future_test_time = now + timedelta(minutes=59, seconds=59)

        # Schedule each job a second later than the previous job,
        # otherwise Redis will return jobs that have the same scheduled time in
        # lexicographical order (not the order in which we enqueued them)
        now_jobs = [
            self.scheduler.enqueue_at(now + timedelta(seconds=x), say_hello)
            for x in range(15)
        ]
        future_jobs = [
            self.scheduler.enqueue_at(future_time + timedelta(seconds=x),
                                      say_hello) for x in range(15)
        ]

        expected_slice = now_jobs[
            5:] + future_jobs[:
                              10]  # last 10 from now_jobs and first 10 from future_jobs
        expected_until_slice = now_jobs[5:]  # last 10 from now_jobs

        jobs = self.scheduler.get_jobs()
        jobs_slice = self.scheduler.get_jobs(offset=5, length=20)
        jobs_until_slice = self.scheduler.get_jobs(future_test_time,
                                                   offset=5,
                                                   length=20)

        self.assertEqual(now_jobs + future_jobs, list(jobs))
        self.assertEqual(expected_slice, list(jobs_slice))
        self.assertEqual(expected_until_slice, list(jobs_until_slice))

    def test_get_jobs_to_queue(self):
        """
        Ensure that jobs scheduled the future are not queued.
        """
        now = datetime.utcnow()
        job = self.scheduler.enqueue_at(now, say_hello)
        self.assertIn(job, self.scheduler.get_jobs_to_queue())
        future_time = now + timedelta(hours=1)
        job = self.scheduler.enqueue_at(future_time, say_hello)
        self.assertNotIn(job, self.scheduler.get_jobs_to_queue())

    def test_enqueue_job(self):
        """
        When scheduled job is enqueued, make sure:
        - Job is removed from the sorted set of scheduled jobs
        - "enqueued_at" attribute is properly set
        - Job appears in the right queue
        - Queue is recognized by rq's Queue.all()
        """
        now = datetime.utcnow()
        queue_name = 'foo'
        scheduler = Scheduler(connection=self.testconn, queue_name=queue_name)

        job = scheduler.enqueue_at(now, say_hello)
        self.scheduler.enqueue_job(job)
        self.assertNotIn(
            job, tl(self.testconn.zrange(scheduler.scheduled_jobs_key, 0, 10)))
        job = Job.fetch(job.id, connection=self.testconn)
        self.assertTrue(job.enqueued_at is not None)
        queue = scheduler.get_queue_for_job(job)
        self.assertIn(job, queue.jobs)
        queue = Queue.from_queue_key('rq:queue:{0}'.format(queue_name))
        self.assertIn(job, queue.jobs)
        self.assertIn(queue, Queue.all())

    def test_enqueue_job_with_scheduler_queue(self):
        """
        Ensure that job is enqueued correctly when the scheduler is bound
        to a queue object and job queue name is not provided.
        """
        queue = Queue('foo', connection=self.testconn)
        scheduler = Scheduler(connection=self.testconn, queue=queue)
        job = scheduler._create_job(say_hello)
        scheduler_queue = scheduler.get_queue_for_job(job)
        self.assertEqual(queue, scheduler_queue)
        scheduler.enqueue_job(job)
        self.assertTrue(job.enqueued_at is not None)
        self.assertIn(job, queue.jobs)
        self.assertIn(queue, Queue.all())

    def test_enqueue_job_with_job_queue_name(self):
        """
        Ensure that job is enqueued correctly when queue_name is provided
        at job creation
        """
        queue = Queue('foo', connection=self.testconn)
        job_queue = Queue('job_foo', connection=self.testconn)
        scheduler = Scheduler(connection=self.testconn, queue=queue)
        job = scheduler._create_job(say_hello, queue_name='job_foo')
        self.assertEqual(scheduler.get_queue_for_job(job), job_queue)
        scheduler.enqueue_job(job)
        self.assertTrue(job.enqueued_at is not None)
        self.assertIn(job, job_queue.jobs)
        self.assertIn(job_queue, Queue.all())

    def test_enqueue_at_with_job_queue_name(self):
        """
        Ensure that job is enqueued correctly when queue_name is provided
        to enqueue_at
        """
        queue = Queue('foo', connection=self.testconn)
        job_queue = Queue('job_foo', connection=self.testconn)
        scheduler = Scheduler(connection=self.testconn, queue=queue)
        job = scheduler.enqueue_at(datetime.utcnow(),
                                   say_hello,
                                   queue_name='job_foo')
        self.assertEqual(scheduler.get_queue_for_job(job), job_queue)
        self.scheduler.enqueue_job(job)
        self.assertTrue(job.enqueued_at is not None)
        self.assertIn(job, job_queue.jobs)
        self.assertIn(job_queue, Queue.all())

    def test_job_membership(self):
        now = datetime.utcnow()
        job = self.scheduler.enqueue_at(now, say_hello)
        self.assertIn(job, self.scheduler)
        self.assertIn(job.id, self.scheduler)
        self.assertNotIn("non-existing-job-id", self.scheduler)

    def test_cancel_scheduled_job(self):
        """
        When scheduled job is canceled, make sure:
        - Job is removed from the sorted set of scheduled jobs
        """
        # schedule a job to be enqueued one minute from now
        time_delta = timedelta(minutes=1)
        job = self.scheduler.enqueue_in(time_delta, say_hello)
        # cancel the scheduled job and check that it's gone from the set
        self.scheduler.cancel(job)
        self.assertNotIn(
            job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))

    def test_change_execution_time(self):
        """
        Ensure ``change_execution_time`` is called, ensure that job's score is updated
        """
        job = self.scheduler.enqueue_at(datetime.utcnow(), say_hello)
        new_date = datetime(2010, 1, 1)
        self.scheduler.change_execution_time(job, new_date)
        self.assertEqual(
            to_unix(new_date),
            self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id))
        self.scheduler.cancel(job)
        self.assertRaises(ValueError, self.scheduler.change_execution_time,
                          job, new_date)

    def test_args_kwargs_are_passed_correctly(self):
        """
        Ensure that arguments and keyword arguments are properly saved to jobs.
        """
        job = self.scheduler.enqueue_at(datetime.utcnow(), simple_addition, 1,
                                        1, 1)
        self.assertEqual(job.args, (1, 1, 1))
        job = self.scheduler.enqueue_at(datetime.utcnow(),
                                        simple_addition,
                                        z=1,
                                        y=1,
                                        x=1)
        self.assertEqual(job.kwargs, {'x': 1, 'y': 1, 'z': 1})
        job = self.scheduler.enqueue_at(datetime.utcnow(),
                                        simple_addition,
                                        1,
                                        z=1,
                                        y=1)
        self.assertEqual(job.kwargs, {'y': 1, 'z': 1})
        self.assertEqual(job.args, (1, ))

        time_delta = timedelta(minutes=1)
        job = self.scheduler.enqueue_in(time_delta, simple_addition, 1, 1, 1)
        self.assertEqual(job.args, (1, 1, 1))
        job = self.scheduler.enqueue_in(time_delta,
                                        simple_addition,
                                        z=1,
                                        y=1,
                                        x=1)
        self.assertEqual(job.kwargs, {'x': 1, 'y': 1, 'z': 1})
        job = self.scheduler.enqueue_in(time_delta,
                                        simple_addition,
                                        1,
                                        z=1,
                                        y=1)
        self.assertEqual(job.kwargs, {'y': 1, 'z': 1})
        self.assertEqual(job.args, (1, ))

    def test_interval_and_repeat_persisted_correctly(self):
        """
        Ensure that interval and repeat attributes are correctly saved.
        """
        job = self.scheduler.schedule(datetime.utcnow(),
                                      say_hello,
                                      interval=10,
                                      repeat=11)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job_from_queue.meta['interval'], 10)
        self.assertEqual(job_from_queue.meta['repeat'], 11)

    def test_crontab_persisted_correctly(self):
        """
        Ensure that crontab attribute gets correctly saved in Redis.
        """
        # create a job that runs one minute past each whole hour
        job = self.scheduler.cron("1 * * * *", say_hello)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job_from_queue.meta['cron_string'], "1 * * * *")

        # get the scheduled_time and convert it to a datetime object
        unix_time = self.testconn.zscore(self.scheduler.scheduled_jobs_key,
                                         job.id)
        datetime_time = from_unix(unix_time)

        # check that minute=1, seconds=0, and is within an hour
        assert datetime_time.minute == 1
        assert datetime_time.second == 0
        assert datetime_time - datetime.utcnow() < timedelta(hours=1)

    def test_crontab_persisted_correctly_with_local_timezone(self):
        """
        Ensure that crontab attribute gets correctly saved in Redis when using local TZ.
        """
        # create a job that runs one minute past each whole hour
        job = self.scheduler.cron("0 15 * * *",
                                  say_hello,
                                  use_local_timezone=True)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job_from_queue.meta['cron_string'], "0 15 * * *")

        # get the scheduled_time and convert it to a datetime object
        unix_time = self.testconn.zscore(self.scheduler.scheduled_jobs_key,
                                         job.id)
        datetime_time = from_unix(unix_time)

        expected_datetime_in_local_tz = datetime.now(
            get_utc_timezone()).replace(hour=15,
                                        minute=0,
                                        second=0,
                                        microsecond=0)
        assert datetime_time.time(
        ) == expected_datetime_in_local_tz.astimezone(
            get_utc_timezone()).time()

    def test_crontab_rescheduled_correctly_with_local_timezone(self):
        # Create a job with a cronjob_string
        job = self.scheduler.cron("1 15 * * *",
                                  say_hello,
                                  use_local_timezone=True)

        # change crontab
        job.meta['cron_string'] = "2 15 * * *"

        # reenqueue the job
        self.scheduler.enqueue_job(job)

        # get the scheduled_time and convert it to a datetime object
        unix_time = self.testconn.zscore(self.scheduler.scheduled_jobs_key,
                                         job.id)
        datetime_time = from_unix(unix_time)

        expected_datetime_in_local_tz = datetime.now(
            get_utc_timezone()).replace(hour=15,
                                        minute=2,
                                        second=0,
                                        microsecond=0)
        assert datetime_time.time(
        ) == expected_datetime_in_local_tz.astimezone(
            get_utc_timezone()).time()

    def test_crontab_sets_timeout(self):
        """
        Ensure that a job scheduled via crontab can be created with
        a custom timeout.
        """
        timeout = 13
        job = self.scheduler.cron("1 * * * *", say_hello, timeout=timeout)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job_from_queue.timeout, timeout)

    def test_crontab_sets_id(self):
        """
        Ensure that a job scheduled via crontab can be created with
        a custom id
        """
        job_id = "hello-job-id"
        job = self.scheduler.cron("1 * * * *", say_hello, id=job_id)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job_id, job_from_queue.id)

    def test_crontab_sets_default_result_ttl(self):
        """
        Ensure that a job scheduled via crontab gets proper default
        result_ttl (-1) periodic tasks.
        """
        job = self.scheduler.cron("1 * * * *", say_hello)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(-1, job_from_queue.result_ttl)

    def test_crontab_sets_description(self):
        """
        Ensure that a job scheduled via crontab can be created with
        a custom description
        """
        description = 'test description'
        job = self.scheduler.cron("1 * * * *",
                                  say_hello,
                                  description=description)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(description, job_from_queue.description)

    def test_repeat_without_interval_raises_error(self):
        # Ensure that an error is raised if repeat is specified without interval
        def create_job():
            self.scheduler.schedule(datetime.utcnow(), say_hello, repeat=11)

        self.assertRaises(ValueError, create_job)

    def test_job_with_intervals_get_rescheduled(self):
        """
        Ensure jobs with interval attribute are put back in the scheduler
        """
        time_now = datetime.utcnow()
        interval = 10
        job = self.scheduler.schedule(time_now, say_hello, interval=interval)
        self.scheduler.enqueue_job(job)
        self.assertIn(
            job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))
        self.assertEqual(
            self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
            to_unix(time_now) + interval)

    def test_job_with_interval_can_set_meta(self):
        """
        Ensure that jobs with interval attribute can be created with meta
        """
        time_now = datetime.utcnow()
        interval = 10
        meta = {'say': 'hello'}
        job = self.scheduler.schedule(time_now,
                                      say_hello,
                                      interval=interval,
                                      meta=meta)
        self.scheduler.enqueue_job(job)
        self.assertEqual(job.meta, meta)

    def test_job_with_crontab_get_rescheduled(self):
        # Create a job with a cronjob_string
        job = self.scheduler.cron("1 * * * *", say_hello)

        # current unix_time
        old_next_scheduled_time = self.testconn.zscore(
            self.scheduler.scheduled_jobs_key, job.id)

        # change crontab
        job.meta['cron_string'] = "2 * * * *"

        # enqueue the job
        self.scheduler.enqueue_job(job)

        self.assertIn(
            job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))

        # check that next scheduled time has changed
        self.assertNotEqual(
            old_next_scheduled_time,
            self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id))

        # check that new next scheduled time is set correctly
        expected_next_scheduled_time = to_unix(
            get_next_scheduled_time("2 * * * *"))
        self.assertEqual(
            self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
            expected_next_scheduled_time)

    def test_job_with_repeat(self):
        """
        Ensure jobs with repeat attribute are put back in the scheduler
        X (repeat) number of times
        """
        time_now = datetime.utcnow()
        interval = 10
        # If job is repeated once, the job shouldn't be put back in the queue
        job = self.scheduler.schedule(time_now,
                                      say_hello,
                                      interval=interval,
                                      repeat=1)
        self.scheduler.enqueue_job(job)
        self.assertNotIn(
            job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))

        # If job is repeated twice, it should only be put back in the queue once
        job = self.scheduler.schedule(time_now,
                                      say_hello,
                                      interval=interval,
                                      repeat=2)
        self.scheduler.enqueue_job(job)
        self.assertIn(
            job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))
        self.scheduler.enqueue_job(job)
        self.assertNotIn(
            job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))

    def test_missing_jobs_removed_from_scheduler(self):
        """
        Ensure jobs that don't exist when queued are removed from the scheduler.
        """
        job = self.scheduler.schedule(datetime.utcnow(), say_hello)
        job.cancel()
        list(self.scheduler.get_jobs_to_queue())
        self.assertIn(
            job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))
        job.delete()
        list(self.scheduler.get_jobs_to_queue())
        self.assertNotIn(
            job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))

    def test_periodic_jobs_sets_result_ttl(self):
        """
        Ensure periodic jobs set result_ttl to infinite.
        """
        job = self.scheduler.schedule(datetime.utcnow(), say_hello, interval=5)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job.result_ttl, -1)

    def test_periodic_jobs_sets_ttl(self):
        """
        Ensure periodic jobs sets correctly ttl.
        """
        job = self.scheduler.schedule(datetime.utcnow(),
                                      say_hello,
                                      interval=5,
                                      ttl=4)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job.ttl, 4)

    def test_periodic_jobs_sets_meta(self):
        """
        Ensure periodic jobs sets correctly meta.
        """
        meta = {'say': 'hello'}
        job = self.scheduler.schedule(datetime.utcnow(),
                                      say_hello,
                                      interval=5,
                                      meta=meta)
        self.assertEqual(meta, job.meta)

    def test_periodic_job_sets_id(self):
        """
        Ensure that ID is passed to RQ by schedule.
        """
        job = self.scheduler.schedule(datetime.utcnow(),
                                      say_hello,
                                      interval=5,
                                      id='id test')
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual('id test', job.id)

    def test_periodic_job_sets_description(self):
        """
        Ensure that description is passed to RQ by schedule.
        """
        job = self.scheduler.schedule(datetime.utcnow(),
                                      say_hello,
                                      interval=5,
                                      description='description')
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual('description', job.description)

    def test_run(self):
        """
        Check correct signal handling in Scheduler.run().
        """
        def send_stop_signal():
            """
            Sleep for 1 second, then send a INT signal to ourself, so the
            signal handler installed by scheduler.run() is called.
            """
            time.sleep(1)
            os.kill(os.getpid(), signal.SIGINT)

        thread = Thread(target=send_stop_signal)
        thread.start()
        self.assertRaises(SystemExit, self.scheduler.run)
        thread.join()

    def test_run_burst(self):
        """
        Check burst mode of Scheduler.run().
        """
        now = datetime.utcnow()
        job = self.scheduler.enqueue_at(now, say_hello)
        self.assertIn(job, self.scheduler.get_jobs_to_queue())
        self.assertEqual(len(list(self.scheduler.get_jobs())), 1)
        self.scheduler.run(burst=True)
        self.assertEqual(len(list(self.scheduler.get_jobs())), 0)

    def test_scheduler_w_o_explicit_connection(self):
        """
        Ensure instantiating Scheduler w/o explicit connection works.
        """
        s = Scheduler()
        self.assertEqual(s.connection, self.testconn)

    def test_small_float_interval(self):
        """
        Test that scheduler accepts 'interval' of type float, less than 1 second.
        """
        key = Scheduler.scheduler_key
        lock_key = '%s_lock' % Scheduler.scheduler_key
        self.assertNotIn(key, tl(self.testconn.keys('*')))
        scheduler = Scheduler(connection=self.testconn,
                              interval=0.1)  # testing interval = 0.1 second
        self.assertEqual(scheduler._interval, 0.1)

        #acquire lock
        self.assertTrue(scheduler.acquire_lock())
        self.assertIn(lock_key, tl(self.testconn.keys('*')))
        self.assertEqual(self.testconn.ttl(lock_key), 10)  # int(0.1) + 10 = 10

        #enqueue a job
        now = datetime.utcnow()
        job = scheduler.enqueue_at(now, say_hello)
        self.assertIn(job, self.scheduler.get_jobs_to_queue())
        self.assertEqual(len(list(self.scheduler.get_jobs())), 1)

        #remove the lock
        scheduler.remove_lock()

        #test that run works with the small floating-point interval
        def send_stop_signal():
            """
            Sleep for 1 second, then send a INT signal to ourself, so the
            signal handler installed by scheduler.run() is called.
            """
            time.sleep(1)
            os.kill(os.getpid(), signal.SIGINT)

        thread = Thread(target=send_stop_signal)
        thread.start()
        self.assertRaises(SystemExit, scheduler.run)
        thread.join()

        #all jobs must have been scheduled during 1 second
        self.assertEqual(len(list(scheduler.get_jobs())), 0)

    def test_get_queue_for_job_with_job_queue_name(self):
        """
        Tests that scheduler gets the correct queue for the job when
        queue_name is provided.
        """
        queue = Queue('scheduler_foo', connection=self.testconn)
        job_queue = Queue('job_foo', connection=self.testconn)
        scheduler = Scheduler(connection=self.testconn, queue=queue)
        job = scheduler._create_job(say_hello, queue_name='job_foo')
        self.assertEqual(scheduler.get_queue_for_job(job), job_queue)

    def test_get_queue_for_job_without_job_queue_name(self):
        """
        Tests that scheduler gets the scheduler queue for the job
        when queue name is not provided for that job.
        """
        queue = Queue('scheduler_foo', connection=self.testconn)
        scheduler = Scheduler(connection=self.testconn, queue=queue)
        job = scheduler._create_job(say_hello)
        self.assertEqual(scheduler.get_queue_for_job(job), queue)
Exemple #18
0
class TestScheduler(RQTestCase):

    def setUp(self):
        super(TestScheduler, self).setUp()
        self.scheduler = Scheduler(connection=self.testconn)

    def test_birth_and_death_registration(self):
        """
        When scheduler registers it's birth, besides creating a key, it should
        also set an expiry that's a few seconds longer than it's polling
        interval so it automatically expires if scheduler is unexpectedly
        terminated.
        """
        key = Scheduler.scheduler_key
        self.assertNotIn(key, tl(self.testconn.keys('*')))
        scheduler = Scheduler(connection=self.testconn, interval=20)
        scheduler.register_birth()
        self.assertIn(key, tl(self.testconn.keys('*')))
        self.assertEqual(self.testconn.ttl(key), 30)
        self.assertFalse(self.testconn.hexists(key, 'death'))
        self.assertRaises(ValueError, scheduler.register_birth)
        scheduler.register_death()
        self.assertTrue(self.testconn.hexists(key, 'death'))

    def test_create_job(self):
        """
        Ensure that jobs are created properly.
        """
        job = self.scheduler._create_job(say_hello, args=(), kwargs={})
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job, job_from_queue)
        self.assertEqual(job_from_queue.func, say_hello)

    def test_create_job_with_ttl(self):
        """
        Ensure that TTL is passed to RQ.
        """
        job = self.scheduler._create_job(say_hello, ttl=2, args=(), kwargs={})
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(2, job_from_queue.ttl)

    def test_create_job_with_id(self):
        """
        Ensure that ID is passed to RQ.
        """
        job = self.scheduler._create_job(say_hello, id='id test', args=(), kwargs={})
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual('id test', job_from_queue.id)

    def test_create_job_with_description(self):
        """
        Ensure that description is passed to RQ.
        """
        job = self.scheduler._create_job(say_hello, description='description', args=(), kwargs={})
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual('description', job_from_queue.description)

    def test_job_not_persisted_if_commit_false(self):
        """
        Ensure jobs are only saved to Redis if commit=True.
        """
        job = self.scheduler._create_job(say_hello, commit=False)
        self.assertEqual(self.testconn.hgetall(job.key), {})

    def test_create_scheduled_job(self):
        """
        Ensure that scheduled jobs are put in the scheduler queue with the right score
        """
        scheduled_time = datetime.utcnow()
        job = self.scheduler.enqueue_at(scheduled_time, say_hello)
        self.assertEqual(job, Job.fetch(job.id, connection=self.testconn))
        self.assertIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))
        self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
                         to_unix(scheduled_time))

    def test_enqueue_in(self):
        """
        Ensure that jobs have the right scheduled time.
        """
        right_now = datetime.utcnow()
        time_delta = timedelta(minutes=1)
        job = self.scheduler.enqueue_in(time_delta, say_hello)
        self.assertIn(job.id,
                      tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))
        self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
                         to_unix(right_now + time_delta))
        time_delta = timedelta(hours=1)
        job = self.scheduler.enqueue_in(time_delta, say_hello)
        self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
                         to_unix(right_now + time_delta))

    def test_get_jobs(self):
        """
        Ensure get_jobs() returns all jobs until the specified time.
        """
        now = datetime.utcnow()
        job = self.scheduler.enqueue_at(now, say_hello)
        self.assertIn(job, self.scheduler.get_jobs(now))
        future_time = now + timedelta(hours=1)
        job = self.scheduler.enqueue_at(future_time, say_hello)
        self.assertIn(job, self.scheduler.get_jobs(timedelta(hours=1, seconds=1)))
        self.assertIn(job, [j[0] for j in self.scheduler.get_jobs(with_times=True)])
        self.assertIsInstance(self.scheduler.get_jobs(with_times=True)[0][1], datetime)
        self.assertNotIn(job, self.scheduler.get_jobs(timedelta(minutes=59, seconds=59)))

    def test_get_jobs_to_queue(self):
        """
        Ensure that jobs scheduled the future are not queued.
        """
        now = datetime.utcnow()
        job = self.scheduler.enqueue_at(now, say_hello)
        self.assertIn(job, self.scheduler.get_jobs_to_queue())
        future_time = now + timedelta(hours=1)
        job = self.scheduler.enqueue_at(future_time, say_hello)
        self.assertNotIn(job, self.scheduler.get_jobs_to_queue())

    def test_enqueue_job(self):
        """
        When scheduled job is enqueued, make sure:
        - Job is removed from the sorted set of scheduled jobs
        - "enqueued_at" attribute is properly set
        - Job appears in the right queue
        - Queue is recognized by rq's Queue.all()
        """
        now = datetime.utcnow()
        queue_name = 'foo'
        scheduler = Scheduler(connection=self.testconn, queue_name=queue_name)

        job = scheduler.enqueue_at(now, say_hello)
        self.scheduler.enqueue_job(job)
        self.assertNotIn(job, tl(self.testconn.zrange(scheduler.scheduled_jobs_key, 0, 10)))
        job = Job.fetch(job.id, connection=self.testconn)
        self.assertTrue(job.enqueued_at is not None)
        queue = scheduler.get_queue_for_job(job)
        self.assertIn(job, queue.jobs)
        queue = Queue.from_queue_key('rq:queue:{0}'.format(queue_name))
        self.assertIn(job, queue.jobs)
        self.assertIn(queue, Queue.all())

    def test_job_membership(self):
        now = datetime.utcnow()
        job = self.scheduler.enqueue_at(now, say_hello)
        self.assertIn(job, self.scheduler)
        self.assertIn(job.id, self.scheduler)
        self.assertNotIn("non-existing-job-id", self.scheduler)

    def test_cancel_scheduled_job(self):
        """
        When scheduled job is canceled, make sure:
        - Job is removed from the sorted set of scheduled jobs
        """
        # schedule a job to be enqueued one minute from now
        time_delta = timedelta(minutes=1)
        job = self.scheduler.enqueue_in(time_delta, say_hello)
        # cancel the scheduled job and check that it's gone from the set
        self.scheduler.cancel(job)
        self.assertNotIn(job.id, tl(self.testconn.zrange(
            self.scheduler.scheduled_jobs_key, 0, 1)))

    def test_change_execution_time(self):
        """
        Ensure ``change_execution_time`` is called, ensure that job's score is updated
        """
        job = self.scheduler.enqueue_at(datetime.utcnow(), say_hello)
        new_date = datetime(2010, 1, 1)
        self.scheduler.change_execution_time(job, new_date)
        self.assertEqual(to_unix(new_date),
            self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id))
        self.scheduler.cancel(job)
        self.assertRaises(ValueError, self.scheduler.change_execution_time, job, new_date)

    def test_args_kwargs_are_passed_correctly(self):
        """
        Ensure that arguments and keyword arguments are properly saved to jobs.
        """
        job = self.scheduler.enqueue_at(datetime.utcnow(), simple_addition, 1, 1, 1)
        self.assertEqual(job.args, (1, 1, 1))
        job = self.scheduler.enqueue_at(datetime.utcnow(), simple_addition, z=1, y=1, x=1)
        self.assertEqual(job.kwargs, {'x': 1, 'y': 1, 'z': 1})
        job = self.scheduler.enqueue_at(datetime.utcnow(), simple_addition, 1, z=1, y=1)
        self.assertEqual(job.kwargs, {'y': 1, 'z': 1})
        self.assertEqual(job.args, (1,))

        time_delta = timedelta(minutes=1)
        job = self.scheduler.enqueue_in(time_delta, simple_addition, 1, 1, 1)
        self.assertEqual(job.args, (1, 1, 1))
        job = self.scheduler.enqueue_in(time_delta, simple_addition, z=1, y=1, x=1)
        self.assertEqual(job.kwargs, {'x': 1, 'y': 1, 'z': 1})
        job = self.scheduler.enqueue_in(time_delta, simple_addition, 1, z=1, y=1)
        self.assertEqual(job.kwargs, {'y': 1, 'z': 1})
        self.assertEqual(job.args, (1,))

    def test_enqueue_is_deprecated(self):
        """
        Ensure .enqueue() throws a DeprecationWarning
        """
        with warnings.catch_warnings(record=True) as w:
            # Enable all warnings
            warnings.simplefilter("always")
            job = self.scheduler.enqueue(datetime.utcnow(), say_hello)
            self.assertEqual(1, len(w))
            self.assertEqual(w[0].category, DeprecationWarning)

    def test_enqueue_periodic(self):
        """
        Ensure .enqueue_periodic() throws a DeprecationWarning
        """
        with warnings.catch_warnings(record=True) as w:
            # Enable all warnings
            warnings.simplefilter("always")
            job = self.scheduler.enqueue_periodic(datetime.utcnow(), 1, None, say_hello)
            self.assertEqual(1, len(w))
            self.assertEqual(w[0].category, DeprecationWarning)

    def test_interval_and_repeat_persisted_correctly(self):
        """
        Ensure that interval and repeat attributes get correctly saved in Redis.
        """
        job = self.scheduler.schedule(datetime.utcnow(), say_hello, interval=10, repeat=11)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job_from_queue.meta['interval'], 10)
        self.assertEqual(job_from_queue.meta['repeat'], 11)

    def test_crontab_persisted_correctly(self):
        """
        Ensure that crontab attribute gets correctly saved in Redis.
        """
        # create a job that runs one minute past each whole hour
        job = self.scheduler.cron("1 * * * *", say_hello)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job_from_queue.meta['cron_string'], "1 * * * *")

        # get the scheduled_time and convert it to a datetime object
        unix_time = self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id)
        datetime_time = from_unix(unix_time)

        # check that minute=1, seconds=0, and is within an hour
        assert datetime_time.minute == 1
        assert datetime_time.second == 0
        assert datetime_time - datetime.utcnow() < timedelta(hours=1)

    def test_crontab_sets_timeout(self):
        """
        Ensure that a job scheduled via crontab can be created with
        a custom timeout.
        """
        timeout = 13
        job = self.scheduler.cron("1 * * * *", say_hello, timeout=timeout)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job_from_queue.timeout, timeout)

    def test_crontab_sets_id(self):
        """
        Ensure that a job scheduled via crontab can be created with
        a custom id
        """
        job_id = "hello-job-id"
        job = self.scheduler.cron("1 * * * *", say_hello, id=job_id)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job_id, job_from_queue.id)

    def test_crontab_sets_default_result_ttl(self):
        """
        Ensure that a job scheduled via crontab gets proper default
        result_ttl (-1) periodic tasks.
        """
        job = self.scheduler.cron("1 * * * *", say_hello)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(-1, job_from_queue.result_ttl)

    def test_crontab_sets_description(self):
        """
        Ensure that a job scheduled via crontab can be created with
        a custom description
        """
        description = 'test description'
        job = self.scheduler.cron("1 * * * *", say_hello, description=description)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(description, job_from_queue.description)

    def test_repeat_without_interval_raises_error(self):
        # Ensure that an error is raised if repeat is specified without interval
        def create_job():
            self.scheduler.schedule(datetime.utcnow(), say_hello, repeat=11)
        self.assertRaises(ValueError, create_job)

    def test_job_with_intervals_get_rescheduled(self):
        """
        Ensure jobs with interval attribute are put back in the scheduler
        """
        time_now = datetime.utcnow()
        interval = 10
        job = self.scheduler.schedule(time_now, say_hello, interval=interval)
        self.scheduler.enqueue_job(job)
        self.assertIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))
        self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
                         to_unix(time_now) + interval)

        # Now the same thing using enqueue_periodic
        job = self.scheduler.enqueue_periodic(time_now, interval, None, say_hello)
        self.scheduler.enqueue_job(job)
        self.assertIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))
        self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
                         to_unix(time_now) + interval)

    def test_job_with_crontab_get_rescheduled(self):
        # Create a job with a cronjob_string
        job = self.scheduler.cron("1 * * * *", say_hello)

        # current unix_time
        old_next_scheduled_time = self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id)

        # change crontab
        job.meta['cron_string'] = "2 * * * *"

        # enqueue the job
        self.scheduler.enqueue_job(job)

        self.assertIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))

        # check that next scheduled time has changed
        self.assertNotEqual(old_next_scheduled_time,
                            self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id))

        # check that new next scheduled time is set correctly
        expected_next_scheduled_time = to_unix(get_next_scheduled_time("2 * * * *"))
        self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
                         expected_next_scheduled_time)

    def test_job_with_repeat(self):
        """
        Ensure jobs with repeat attribute are put back in the scheduler
        X (repeat) number of times
        """
        time_now = datetime.utcnow()
        interval = 10
        # If job is repeated once, the job shouldn't be put back in the queue
        job = self.scheduler.schedule(time_now, say_hello, interval=interval, repeat=1)
        self.scheduler.enqueue_job(job)
        self.assertNotIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))

        # If job is repeated twice, it should only be put back in the queue once
        job = self.scheduler.schedule(time_now, say_hello, interval=interval, repeat=2)
        self.scheduler.enqueue_job(job)
        self.assertIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))
        self.scheduler.enqueue_job(job)
        self.assertNotIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))

        time_now = datetime.utcnow()
        # Now the same thing using enqueue_periodic
        job = self.scheduler.enqueue_periodic(time_now, interval, 1, say_hello)
        self.scheduler.enqueue_job(job)
        self.assertNotIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))

        # If job is repeated twice, it should only be put back in the queue once
        job = self.scheduler.enqueue_periodic(time_now, interval, 2, say_hello)
        self.scheduler.enqueue_job(job)
        self.assertIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))
        self.scheduler.enqueue_job(job)
        self.assertNotIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))

    def test_missing_jobs_removed_from_scheduler(self):
        """
        Ensure jobs that don't exist when queued are removed from the scheduler.
        """
        job = self.scheduler.schedule(datetime.utcnow(), say_hello)
        job.cancel()
        self.scheduler.get_jobs_to_queue()
        self.assertNotIn(job.id, tl(self.testconn.zrange(
            self.scheduler.scheduled_jobs_key, 0, 1)))

    def test_periodic_jobs_sets_result_ttl(self):
        """
        Ensure periodic jobs set result_ttl to infinite.
        """
        job = self.scheduler.schedule(datetime.utcnow(), say_hello, interval=5)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job.result_ttl, -1)

    def test_periodic_jobs_sets_ttl(self):
        """
        Ensure periodic jobs sets correctly ttl.
        """
        job = self.scheduler.schedule(datetime.utcnow(), say_hello, interval=5, ttl=4)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job.ttl, 4)

    def test_periodic_job_sets_id(self):
        """
        Ensure that ID is passed to RQ by schedule.
        """
        job = self.scheduler.schedule(datetime.utcnow(), say_hello, interval=5, id='id test')
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual('id test', job.id)

    def test_periodic_job_sets_description(self):
        """
        Ensure that description is passed to RQ by schedule.
        """
        job = self.scheduler.schedule(datetime.utcnow(), say_hello, interval=5, description='description')
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual('description', job.description)

    def test_run(self):
        """
        Check correct signal handling in Scheduler.run().
        """
        def send_stop_signal():
            """
            Sleep for 1 second, then send a INT signal to ourself, so the
            signal handler installed by scheduler.run() is called.
            """
            time.sleep(1)
            os.kill(os.getpid(), signal.SIGINT)
        thread = Thread(target=send_stop_signal)
        thread.start()
        self.assertRaises(SystemExit, self.scheduler.run)
        thread.join()

    def test_scheduler_w_o_explicit_connection(self):
        """
        Ensure instantiating Scheduler w/o explicit connection works.
        """
        s = Scheduler()
        self.assertEqual(s.connection, self.testconn)

    def test_small_float_interval(self):
        """
        Test that scheduler accepts 'interval' of type float, less than 1 second.
        """
        key = Scheduler.scheduler_key
        self.assertNotIn(key, tl(self.testconn.keys('*')))
        scheduler = Scheduler(connection=self.testconn, interval=0.1)   # testing interval = 0.1 second
        self.assertEqual(scheduler._interval, 0.1)

        #register birth
        scheduler.register_birth()
        self.assertIn(key, tl(self.testconn.keys('*')))
        self.assertEqual(self.testconn.ttl(key), 10)  # int(0.1) + 10 = 10
        self.assertFalse(self.testconn.hexists(key, 'death'))

        #enqueue a job
        now = datetime.utcnow()
        job = scheduler.enqueue_at(now, say_hello)
        self.assertIn(job, self.scheduler.get_jobs_to_queue())
        self.assertEqual(len(self.scheduler.get_jobs()), 1)

        #register death
        scheduler.register_death()

        #test that run works with the small floating-point interval
        def send_stop_signal():
            """
            Sleep for 1 second, then send a INT signal to ourself, so the
            signal handler installed by scheduler.run() is called.
            """
            time.sleep(1)
            os.kill(os.getpid(), signal.SIGINT)
        thread = Thread(target=send_stop_signal)
        thread.start()
        self.assertRaises(SystemExit, scheduler.run)
        thread.join()

        #all jobs must have been scheduled during 1 second
        self.assertEqual(len(scheduler.get_jobs()), 0)
Exemple #19
0
class TestScheduler(RQTestCase):

    def setUp(self):
        super(TestScheduler, self).setUp()
        self.scheduler = Scheduler(connection=self.testconn)

    def test_acquire_lock(self):
        """
        When scheduler acquires a lock, besides creating a key, it should
        also set an expiry that's a few seconds longer than it's polling
        interval so it automatically expires if scheduler is unexpectedly
        terminated.
        """
        key = '%s_lock' % Scheduler.scheduler_key
        self.assertNotIn(key, tl(self.testconn.keys('*')))
        scheduler = Scheduler(connection=self.testconn, interval=20)
        self.assertTrue(scheduler.acquire_lock())
        self.assertIn(key, tl(self.testconn.keys('*')))
        self.assertEqual(self.testconn.ttl(key), 30)
        scheduler.remove_lock()
        self.assertNotIn(key, tl(self.testconn.keys('*')))

    def test_no_two_schedulers_acquire_lock(self):
        """
        Ensure that no two schedulers can acquire the lock at the
        same time. When removing the lock, only the scheduler which
        originally acquired the lock can remove the lock.
        """
        key = '%s_lock' % Scheduler.scheduler_key
        self.assertNotIn(key, tl(self.testconn.keys('*')))
        scheduler1 = Scheduler(connection=self.testconn, interval=20)
        scheduler2 = Scheduler(connection=self.testconn, interval=20)
        self.assertTrue(scheduler1.acquire_lock())
        self.assertFalse(scheduler2.acquire_lock())
        self.assertIn(key, tl(self.testconn.keys('*')))
        scheduler2.remove_lock()
        self.assertIn(key, tl(self.testconn.keys('*')))
        scheduler1.remove_lock()
        self.assertNotIn(key, tl(self.testconn.keys('*')))

    def test_create_job(self):
        """
        Ensure that jobs are created properly.
        """
        job = self.scheduler._create_job(say_hello, args=(), kwargs={})
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job, job_from_queue)
        self.assertEqual(job_from_queue.func, say_hello)

    def test_create_job_with_ttl(self):
        """
        Ensure that TTL is passed to RQ.
        """
        job = self.scheduler._create_job(say_hello, ttl=2, args=(), kwargs={})
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(2, job_from_queue.ttl)

    def test_create_job_with_id(self):
        """
        Ensure that ID is passed to RQ.
        """
        job = self.scheduler._create_job(say_hello, id='id test', args=(), kwargs={})
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual('id test', job_from_queue.id)

    def test_create_job_with_description(self):
        """
        Ensure that description is passed to RQ.
        """
        job = self.scheduler._create_job(say_hello, description='description', args=(), kwargs={})
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual('description', job_from_queue.description)

    def test_create_job_with_timeout(self):
        """
        Ensure that timeout is passed to RQ.
        """
        timeout = 13
        job = self.scheduler._create_job(say_hello, timeout=13, args=(), kwargs={})
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(timeout, job_from_queue.timeout)

    def test_job_not_persisted_if_commit_false(self):
        """
        Ensure jobs are only saved to Redis if commit=True.
        """
        job = self.scheduler._create_job(say_hello, commit=False)
        self.assertEqual(self.testconn.hgetall(job.key), {})

    def test_create_scheduled_job(self):
        """
        Ensure that scheduled jobs are put in the scheduler queue with the right score
        """
        scheduled_time = datetime.utcnow()
        job = self.scheduler.enqueue_at(scheduled_time, say_hello)
        self.assertEqual(job, Job.fetch(job.id, connection=self.testconn))
        self.assertIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))
        self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
                         to_unix(scheduled_time))

    def test_create_job_with_meta(self):
        """
        Ensure that meta information on the job is passed to rq
        """
        expected = {'say': 'hello'}
        job = self.scheduler._create_job(say_hello, meta=expected)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(expected, job_from_queue.meta)

    def test_enqueue_at_sets_timeout(self):
        """
        Ensure that a job scheduled via enqueue_at can be created with
        a custom timeout.
        """
        timeout = 13
        job = self.scheduler.enqueue_at(datetime.utcnow(), say_hello, timeout=timeout)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job_from_queue.timeout, timeout)

    def test_enqueue_at_sets_job_id(self):
        """
        Ensure that a job scheduled via enqueue_at can be created with
        a custom job id.
        """
        job_id = 'test_id'
        job = self.scheduler.enqueue_at(datetime.utcnow(), say_hello, job_id=job_id)
        self.assertEqual(job.id, job_id)

    def test_enqueue_at_sets_job_ttl(self):
        """
        Ensure that a job scheduled via enqueue_at can be created with a custom job ttl.
        """
        job_ttl = 123456789
        job = self.scheduler.enqueue_at(datetime.utcnow(), say_hello, job_ttl=job_ttl)
        self.assertEqual(job.ttl, job_ttl)

    def test_enqueue_at_sets_job_result_ttl(self):
        """
        Ensure that a job scheduled via enqueue_at can be created with a custom result ttl.
        """
        job_result_ttl = 1234567890
        job = self.scheduler.enqueue_at(datetime.utcnow(), say_hello, job_result_ttl=job_result_ttl)
        self.assertEqual(job.result_ttl, job_result_ttl)

    def test_enqueue_at_sets_meta(self):
        """
        Ensure that a job scheduled via enqueue_at can be created with a custom meta.
        """
        meta = {'say': 'hello'}
        job = self.scheduler.enqueue_at(datetime.utcnow(), say_hello, meta=meta)
        self.assertEqual(job.meta, meta)

    def test_enqueue_in(self):
        """
        Ensure that jobs have the right scheduled time.
        """
        right_now = datetime.utcnow()
        time_delta = timedelta(minutes=1)
        job = self.scheduler.enqueue_in(time_delta, say_hello)
        self.assertIn(job.id,
                      tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))
        self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
                         to_unix(right_now + time_delta))
        time_delta = timedelta(hours=1)
        job = self.scheduler.enqueue_in(time_delta, say_hello)
        self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
                         to_unix(right_now + time_delta))

    def test_enqueue_in_sets_timeout(self):
        """
        Ensure that a job scheduled via enqueue_in can be created with
        a custom timeout.
        """
        timeout = 13
        job = self.scheduler.enqueue_in(timedelta(minutes=1), say_hello, timeout=timeout)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job_from_queue.timeout, timeout)

    def test_enqueue_in_sets_job_id(self):
        """
        Ensure that a job scheduled via enqueue_in can be created with
        a custom job id.
        """
        job_id = 'test_id'
        job = self.scheduler.enqueue_in(timedelta(minutes=1), say_hello, job_id=job_id)
        self.assertEqual(job.id, job_id)

    def test_enqueue_in_sets_job_ttl(self):
        """
        Ensure that a job scheduled via enqueue_in can be created with a custom job ttl.
        """
        job_ttl = 123456789
        job = self.scheduler.enqueue_in(timedelta(minutes=1), say_hello, job_ttl=job_ttl)
        self.assertEqual(job.ttl, job_ttl)

    def test_enqueue_in_sets_job_result_ttl(self):
        """
        Ensure that a job scheduled via enqueue_in can be created with a custom result ttl.
        """
        job_result_ttl = 1234567890
        job = self.scheduler.enqueue_in(timedelta(minutes=1), say_hello, job_result_ttl=job_result_ttl)
        self.assertEqual(job.result_ttl, job_result_ttl)

    def test_enqueue_in_sets_meta(self):
        """
        Ensure that a job scheduled via enqueue_in sets meta.
        """
        meta = {'say': 'hello'}
        job = self.scheduler.enqueue_in(timedelta(minutes=1), say_hello, meta=meta)
        self.assertEqual(job.meta, meta)

    def test_count(self):
        now = datetime.utcnow()
        self.scheduler.enqueue_at(now, say_hello)
        self.assertEqual(self.scheduler.count(), 1)

        future_time = now + timedelta(hours=1)
        future_test_time = now + timedelta(minutes=59, seconds=59)

        self.scheduler.enqueue_at(future_time, say_hello)

        self.assertEqual(self.scheduler.count(timedelta(minutes=59, seconds=59)), 1)
        self.assertEqual(self.scheduler.count(future_test_time), 1)
        self.assertEqual(self.scheduler.count(), 2)

    def test_get_jobs(self):
        """
        Ensure get_jobs() returns all jobs until the specified time.
        """
        now = datetime.utcnow()
        job = self.scheduler.enqueue_at(now, say_hello)
        self.assertIn(job, self.scheduler.get_jobs(now))
        future_time = now + timedelta(hours=1)
        job = self.scheduler.enqueue_at(future_time, say_hello)
        self.assertIn(job, self.scheduler.get_jobs(timedelta(hours=1, seconds=1)))
        self.assertIn(job, [j[0] for j in self.scheduler.get_jobs(with_times=True)])
        self.assertIsInstance(list(self.scheduler.get_jobs(with_times=True))[0][1], datetime)
        self.assertNotIn(job, self.scheduler.get_jobs(timedelta(minutes=59, seconds=59)))

    def test_get_jobs_slice(self):
        """
        Ensure get_jobs() returns the appropriate slice of all jobs using offset and length.
        """
        now = datetime.utcnow()
        future_time = now + timedelta(hours=1)
        future_test_time = now + timedelta(minutes=59, seconds=59)

        # Schedule each job a second later than the previous job,
        # otherwise Redis will return jobs that have the same scheduled time in
        # lexicographical order (not the order in which we enqueued them)
        now_jobs = [self.scheduler.enqueue_at(now + timedelta(seconds=x), say_hello)
                    for x in range(15)]
        future_jobs = [self.scheduler.enqueue_at(future_time + timedelta(seconds=x), say_hello)
                       for x in range(15)]

        expected_slice = now_jobs[5:] + future_jobs[:10]   # last 10 from now_jobs and first 10 from future_jobs
        expected_until_slice = now_jobs[5:]                # last 10 from now_jobs

        jobs = self.scheduler.get_jobs()
        jobs_slice = self.scheduler.get_jobs(offset=5, length=20)
        jobs_until_slice = self.scheduler.get_jobs(future_test_time, offset=5, length=20)

        self.assertEqual(now_jobs + future_jobs, list(jobs))
        self.assertEqual(expected_slice, list(jobs_slice))
        self.assertEqual(expected_until_slice, list(jobs_until_slice))

    def test_get_jobs_to_queue(self):
        """
        Ensure that jobs scheduled the future are not queued.
        """
        now = datetime.utcnow()
        job = self.scheduler.enqueue_at(now, say_hello)
        self.assertIn(job, self.scheduler.get_jobs_to_queue())
        future_time = now + timedelta(hours=1)
        job = self.scheduler.enqueue_at(future_time, say_hello)
        self.assertNotIn(job, self.scheduler.get_jobs_to_queue())

    def test_enqueue_job(self):
        """
        When scheduled job is enqueued, make sure:
        - Job is removed from the sorted set of scheduled jobs
        - "enqueued_at" attribute is properly set
        - Job appears in the right queue
        - Queue is recognized by rq's Queue.all()
        """
        now = datetime.utcnow()
        queue_name = 'foo'
        scheduler = Scheduler(connection=self.testconn, queue_name=queue_name)

        job = scheduler.enqueue_at(now, say_hello)
        self.scheduler.enqueue_job(job)
        self.assertNotIn(job, tl(self.testconn.zrange(scheduler.scheduled_jobs_key, 0, 10)))
        job = Job.fetch(job.id, connection=self.testconn)
        self.assertTrue(job.enqueued_at is not None)
        queue = scheduler.get_queue_for_job(job)
        self.assertIn(job, queue.jobs)
        queue = Queue.from_queue_key('rq:queue:{0}'.format(queue_name))
        self.assertIn(job, queue.jobs)
        self.assertIn(queue, Queue.all())

    def test_enqueue_job_with_queue(self):
        """
        Ensure that job is enqueued correctly when the scheduler is bound
        to a queue object.
        """
        queue = Queue('foo', connection=self.testconn)
        scheduler = Scheduler(connection=self.testconn, queue=queue)
        job = scheduler._create_job(say_hello)
        scheduler_queue = scheduler.get_queue_for_job(job)
        self.assertEqual(queue, scheduler_queue)
        scheduler.enqueue_job(job)
        self.assertTrue(job.enqueued_at is not None)
        self.assertIn(job, queue.jobs)
        self.assertIn(queue, Queue.all())

    def test_job_membership(self):
        now = datetime.utcnow()
        job = self.scheduler.enqueue_at(now, say_hello)
        self.assertIn(job, self.scheduler)
        self.assertIn(job.id, self.scheduler)
        self.assertNotIn("non-existing-job-id", self.scheduler)

    def test_cancel_scheduled_job(self):
        """
        When scheduled job is canceled, make sure:
        - Job is removed from the sorted set of scheduled jobs
        """
        # schedule a job to be enqueued one minute from now
        time_delta = timedelta(minutes=1)
        job = self.scheduler.enqueue_in(time_delta, say_hello)
        # cancel the scheduled job and check that it's gone from the set
        self.scheduler.cancel(job)
        self.assertNotIn(job.id, tl(self.testconn.zrange(
            self.scheduler.scheduled_jobs_key, 0, 1)))

    def test_change_execution_time(self):
        """
        Ensure ``change_execution_time`` is called, ensure that job's score is updated
        """
        job = self.scheduler.enqueue_at(datetime.utcnow(), say_hello)
        new_date = datetime(2010, 1, 1)
        self.scheduler.change_execution_time(job, new_date)
        self.assertEqual(to_unix(new_date),
            self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id))
        self.scheduler.cancel(job)
        self.assertRaises(ValueError, self.scheduler.change_execution_time, job, new_date)

    def test_args_kwargs_are_passed_correctly(self):
        """
        Ensure that arguments and keyword arguments are properly saved to jobs.
        """
        job = self.scheduler.enqueue_at(datetime.utcnow(), simple_addition, 1, 1, 1)
        self.assertEqual(job.args, (1, 1, 1))
        job = self.scheduler.enqueue_at(datetime.utcnow(), simple_addition, z=1, y=1, x=1)
        self.assertEqual(job.kwargs, {'x': 1, 'y': 1, 'z': 1})
        job = self.scheduler.enqueue_at(datetime.utcnow(), simple_addition, 1, z=1, y=1)
        self.assertEqual(job.kwargs, {'y': 1, 'z': 1})
        self.assertEqual(job.args, (1,))

        time_delta = timedelta(minutes=1)
        job = self.scheduler.enqueue_in(time_delta, simple_addition, 1, 1, 1)
        self.assertEqual(job.args, (1, 1, 1))
        job = self.scheduler.enqueue_in(time_delta, simple_addition, z=1, y=1, x=1)
        self.assertEqual(job.kwargs, {'x': 1, 'y': 1, 'z': 1})
        job = self.scheduler.enqueue_in(time_delta, simple_addition, 1, z=1, y=1)
        self.assertEqual(job.kwargs, {'y': 1, 'z': 1})
        self.assertEqual(job.args, (1,))

    def test_interval_and_repeat_persisted_correctly(self):
        """
        Ensure that interval and repeat attributes are correctly saved.
        """
        job = self.scheduler.schedule(datetime.utcnow(), say_hello, interval=10, repeat=11)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job_from_queue.meta['interval'], 10)
        self.assertEqual(job_from_queue.meta['repeat'], 11)

    def test_crontab_persisted_correctly(self):
        """
        Ensure that crontab attribute gets correctly saved in Redis.
        """
        # create a job that runs one minute past each whole hour
        job = self.scheduler.cron("1 * * * *", say_hello)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job_from_queue.meta['cron_string'], "1 * * * *")

        # get the scheduled_time and convert it to a datetime object
        unix_time = self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id)
        datetime_time = from_unix(unix_time)

        # check that minute=1, seconds=0, and is within an hour
        assert datetime_time.minute == 1
        assert datetime_time.second == 0
        assert datetime_time - datetime.utcnow() < timedelta(hours=1)

    def test_crontab_sets_timeout(self):
        """
        Ensure that a job scheduled via crontab can be created with
        a custom timeout.
        """
        timeout = 13
        job = self.scheduler.cron("1 * * * *", say_hello, timeout=timeout)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job_from_queue.timeout, timeout)

    def test_crontab_sets_id(self):
        """
        Ensure that a job scheduled via crontab can be created with
        a custom id
        """
        job_id = "hello-job-id"
        job = self.scheduler.cron("1 * * * *", say_hello, id=job_id)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job_id, job_from_queue.id)

    def test_crontab_sets_default_result_ttl(self):
        """
        Ensure that a job scheduled via crontab gets proper default
        result_ttl (-1) periodic tasks.
        """
        job = self.scheduler.cron("1 * * * *", say_hello)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(-1, job_from_queue.result_ttl)

    def test_crontab_sets_description(self):
        """
        Ensure that a job scheduled via crontab can be created with
        a custom description
        """
        description = 'test description'
        job = self.scheduler.cron("1 * * * *", say_hello, description=description)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(description, job_from_queue.description)

    def test_repeat_without_interval_raises_error(self):
        # Ensure that an error is raised if repeat is specified without interval
        def create_job():
            self.scheduler.schedule(datetime.utcnow(), say_hello, repeat=11)
        self.assertRaises(ValueError, create_job)

    def test_job_with_intervals_get_rescheduled(self):
        """
        Ensure jobs with interval attribute are put back in the scheduler
        """
        time_now = datetime.utcnow()
        interval = 10
        job = self.scheduler.schedule(time_now, say_hello, interval=interval)
        self.scheduler.enqueue_job(job)
        self.assertIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))
        self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
                         to_unix(time_now) + interval)

    def test_job_with_interval_can_set_meta(self):
        """
        Ensure that jobs with interval attribute can be created with meta
        """
        time_now = datetime.utcnow()
        interval = 10
        meta = {'say': 'hello'}
        job = self.scheduler.schedule(time_now, say_hello, interval=interval, meta=meta)
        self.scheduler.enqueue_job(job)
        self.assertEqual(job.meta, meta)

    def test_job_with_crontab_get_rescheduled(self):
        # Create a job with a cronjob_string
        job = self.scheduler.cron("1 * * * *", say_hello)

        # current unix_time
        old_next_scheduled_time = self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id)

        # change crontab
        job.meta['cron_string'] = "2 * * * *"

        # enqueue the job
        self.scheduler.enqueue_job(job)

        self.assertIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))

        # check that next scheduled time has changed
        self.assertNotEqual(old_next_scheduled_time,
                            self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id))

        # check that new next scheduled time is set correctly
        expected_next_scheduled_time = to_unix(get_next_scheduled_time("2 * * * *"))
        self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
                         expected_next_scheduled_time)

    def test_job_with_repeat(self):
        """
        Ensure jobs with repeat attribute are put back in the scheduler
        X (repeat) number of times
        """
        time_now = datetime.utcnow()
        interval = 10
        # If job is repeated once, the job shouldn't be put back in the queue
        job = self.scheduler.schedule(time_now, say_hello, interval=interval, repeat=1)
        self.scheduler.enqueue_job(job)
        self.assertNotIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))

        # If job is repeated twice, it should only be put back in the queue once
        job = self.scheduler.schedule(time_now, say_hello, interval=interval, repeat=2)
        self.scheduler.enqueue_job(job)
        self.assertIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))
        self.scheduler.enqueue_job(job)
        self.assertNotIn(job.id,
            tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))

    def test_missing_jobs_removed_from_scheduler(self):
        """
        Ensure jobs that don't exist when queued are removed from the scheduler.
        """
        job = self.scheduler.schedule(datetime.utcnow(), say_hello)
        job.cancel()
        list(self.scheduler.get_jobs_to_queue())
        self.assertIn(job.id, tl(self.testconn.zrange(
            self.scheduler.scheduled_jobs_key, 0, 1)))
        job.delete()
        list(self.scheduler.get_jobs_to_queue())
        self.assertNotIn(job.id, tl(self.testconn.zrange(
            self.scheduler.scheduled_jobs_key, 0, 1)))

    def test_periodic_jobs_sets_result_ttl(self):
        """
        Ensure periodic jobs set result_ttl to infinite.
        """
        job = self.scheduler.schedule(datetime.utcnow(), say_hello, interval=5)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job.result_ttl, -1)

    def test_periodic_jobs_sets_ttl(self):
        """
        Ensure periodic jobs sets correctly ttl.
        """
        job = self.scheduler.schedule(datetime.utcnow(), say_hello, interval=5, ttl=4)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job.ttl, 4)

    def test_periodic_jobs_sets_meta(self):
        """
        Ensure periodic jobs sets correctly meta.
        """
        meta = {'say': 'hello'}
        job = self.scheduler.schedule(datetime.utcnow(), say_hello, interval=5, meta=meta)
        self.assertEqual(meta, job.meta)

    def test_periodic_job_sets_id(self):
        """
        Ensure that ID is passed to RQ by schedule.
        """
        job = self.scheduler.schedule(datetime.utcnow(), say_hello, interval=5, id='id test')
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual('id test', job.id)

    def test_periodic_job_sets_description(self):
        """
        Ensure that description is passed to RQ by schedule.
        """
        job = self.scheduler.schedule(datetime.utcnow(), say_hello, interval=5, description='description')
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual('description', job.description)

    def test_run(self):
        """
        Check correct signal handling in Scheduler.run().
        """
        def send_stop_signal():
            """
            Sleep for 1 second, then send a INT signal to ourself, so the
            signal handler installed by scheduler.run() is called.
            """
            time.sleep(1)
            os.kill(os.getpid(), signal.SIGINT)
        thread = Thread(target=send_stop_signal)
        thread.start()
        self.assertRaises(SystemExit, self.scheduler.run)
        thread.join()

    def test_run_burst(self):
        """
        Check burst mode of Scheduler.run().
        """
        now = datetime.utcnow()
        job = self.scheduler.enqueue_at(now, say_hello)
        self.assertIn(job, self.scheduler.get_jobs_to_queue())
        self.assertEqual(len(list(self.scheduler.get_jobs())), 1)
        self.scheduler.run(burst=True)
        self.assertEqual(len(list(self.scheduler.get_jobs())), 0)

    def test_scheduler_w_o_explicit_connection(self):
        """
        Ensure instantiating Scheduler w/o explicit connection works.
        """
        s = Scheduler()
        self.assertEqual(s.connection, self.testconn)

    def test_small_float_interval(self):
        """
        Test that scheduler accepts 'interval' of type float, less than 1 second.
        """
        key = Scheduler.scheduler_key
        lock_key = '%s_lock' % Scheduler.scheduler_key
        self.assertNotIn(key, tl(self.testconn.keys('*')))
        scheduler = Scheduler(connection=self.testconn, interval=0.1)   # testing interval = 0.1 second
        self.assertEqual(scheduler._interval, 0.1)

        #acquire lock
        self.assertTrue(scheduler.acquire_lock())
        self.assertIn(lock_key, tl(self.testconn.keys('*')))
        self.assertEqual(self.testconn.ttl(lock_key), 10)  # int(0.1) + 10 = 10

        #enqueue a job
        now = datetime.utcnow()
        job = scheduler.enqueue_at(now, say_hello)
        self.assertIn(job, self.scheduler.get_jobs_to_queue())
        self.assertEqual(len(list(self.scheduler.get_jobs())), 1)

        #remove the lock
        scheduler.remove_lock()

        #test that run works with the small floating-point interval
        def send_stop_signal():
            """
            Sleep for 1 second, then send a INT signal to ourself, so the
            signal handler installed by scheduler.run() is called.
            """
            time.sleep(1)
            os.kill(os.getpid(), signal.SIGINT)
        thread = Thread(target=send_stop_signal)
        thread.start()
        self.assertRaises(SystemExit, scheduler.run)
        thread.join()

        #all jobs must have been scheduled during 1 second
        self.assertEqual(len(list(scheduler.get_jobs())), 0)
Exemple #20
0
from django_rq import job, get_connection
from rq_scheduler import Scheduler
from datetime import timedelta

SCHEDULER = Scheduler(connection=get_connection('default'))


@job
def update_search_index():
    call_command('update_index')


@job
def update_catalog_records():
    call_command('detect_dataset_sync_strategy')
    call_command('sync_catalog_record')
    call_command('map_story_dataset_urls')


SCHEDULER.cron(
    "0 1 * * *",
    func=update_search_index,
    repeat=None,  #forever
)

SCHEDULER.cron(
    "0 0 * * *",
    func=update_catalog_records,
    repeat=None,  #forever
)
Exemple #21
0
from rq_scheduler import Scheduler

from helpers import rq_store
from jobs import upload_all

TEST_WORKER_INTERVAL = 60

if __name__ == '__main__':
    scheduler = Scheduler(connection=rq_store, interval=60.0)

    list_of_job_instances = scheduler.get_jobs()
    for job in list_of_job_instances:
        scheduler.cancel(job)

    scheduler.cron(cron_string='0 0 * * *',
                   func=upload_all,
                   args=[],
                   kwargs={},
                   repeat=None)

    scheduler.run()
Exemple #22
0
from redis import Redis
from rq import Queue
from rq_scheduler import Scheduler


from socialgroupmain.scheduler_tasks.del_inactive_users import inactive_users
from socialgroupmain.scheduler_tasks.daily_feed import dailyfeed

scheduler = Scheduler(connection=Redis())

# cron_string = "* * * * *"       # every minute
#
# scheduler.cron(
#     cron_string,
#     func=dailyfeed,  # Function to be queued
#     repeat=1,  # Repeat this number of times (None means repeat forever)
#     queue_name='test',  # In which queue the job should be put in
#     use_local_timezone=True  # Interpret hours in the local timezone
# )


cron_string = "* * * * *"       # every minute

scheduler.cron(
    cron_string,
    func=inactive_users,  # Function to be queued
    repeat=0,  # Repeat this number of times (None means repeat forever)
    queue_name='test',  # In which queue the job should be put in
    use_local_timezone=True  # Interpret hours in the local timezone
)
Exemple #23
0
from redis import Redis
from rq_scheduler import Scheduler
from scheduler_tasks.delete_inactive import inactive_users
from scheduler_tasks.feed import dailyfeed

scheduler = Scheduler(connection=Redis())

cron_string = "* * * * *"  # every minute

scheduler.cron(
    cron_string,
    func=dailyfeed,  # Function to be queued
    repeat=1,  # Repeat this number of times (None means repeat forever)
    queue_name='test',  # In which queue the job should be put in
    use_local_timezone=True  # Interpret hours in the local timezone
)
'''cron_string = "* * * * *"       # every minute

scheduler.cron(
    cron_string,
    func=inactive_users,  # Function to be queued
    repeat=0,  # Repeat this number of times (None means repeat forever)
    queue_name='test',  # In which queue the job should be put in
    use_local_timezone=True  # Interpret hours in the local timezone
)'''
Exemple #24
0
    local = pytz.timezone("Asia/Hong_Kong")
    local_dt = local.localize(t, is_dst=None)
    return(local_dt.astimezone(pytz.utc))


conn = StrictRedis(host='localhost',port=6379)
scheduler = Scheduler(connection=conn)

hk_yday = datetime.now() - timedelta(days=1)
hk_today = datetime.now()

# Update all stock list at local time '0 7 * * 1-6'
scheduler.cron(
    '0 23 * * 0-5',
    func=update_stock,
    args=[],
    kwargs={},
    repeat=None,
    queue_name='stock')

# Update ccass player list at local time '0 4 * * 1-6'
scheduler.cron(
    '0 20 * * 0-5',
    func=update_ccassplayer,
    args=[],
    kwargs={},
    repeat=None,
    queue_name='ccass_player')

# Update sbstock list at local time '5 4 * * 1-6'
scheduler.cron(