Beispiel #1
0
    def testSetUp(self):
        # Switch database to the currently active DemoStorage,
        # see zeit.cms.testing.WSGILayer.testSetUp().
        self['celery_app'].conf['ZODB'] = self['functional_setup'].db

        celery_longterm_scheduler.get_scheduler(
            self['celery_app']).backend.__init__(None, None)
Beispiel #2
0
    def cancel_job(self, job_id):
        import celery_longterm_scheduler  # UI-only dependency

        if not job_id:
            return False
        return celery_longterm_scheduler.get_scheduler(
            zeit.cms.celery.CELERY).revoke(job_id)
def test_revoke_deletes_from_storage():
    due = PAST_DATE
    id = echo.apply_async(('foo',), eta=due).id
    scheduler = celery_longterm_scheduler.get_scheduler(CELERY)
    pending = list(scheduler.backend.get_older_than(due))
    assert pending

    assert scheduler.revoke(id)
    pending = list(scheduler.backend.get_older_than(due))
    assert not pending
def test_execute_pending_runs_scheduled_tasks(celery_worker):
    record_calls[:] == []
    due = PAST_DATE
    record.apply_async(('foo',), eta=due)
    assert not record_calls
    scheduler = celery_longterm_scheduler.get_scheduler(CELERY)
    # Try to execute task, scheduled in the past
    scheduler.execute_pending(pendulum.now())
    # XXX I don't think there is any "job completed" signal we could wait for.
    time.sleep(1)
    assert record_calls == ['foo']
def test_execute_pending_deletes_from_storage():
    due = PAST_DATE
    echo.apply_async(('foo',), eta=due)

    scheduler = celery_longterm_scheduler.get_scheduler(CELERY)
    pending = list(scheduler.backend.get_older_than(due))
    assert pending

    with mock.patch.object(CELERY, 'send_task'):
        scheduler.execute_pending(due)
    pending = list(scheduler.backend.get_older_than(due))
    assert not pending
def test_execute_pending_does_not_run_tasks_scheduled_in_the_future(
        celery_worker):
    record_calls[:] == []
    due = FUTURE_DATE
    record.apply_async(('not_yet',), eta=due)
    assert not record_calls
    scheduler = celery_longterm_scheduler.get_scheduler(CELERY)
    # Try to execute task, scheduled in the future
    scheduler.execute_pending(pendulum.now())
    # XXX I don't think there is any "job completed" signal we could wait for.
    time.sleep(1)
    assert not record_calls
def test_should_store_all_arguments_needed_for_send_task(celery_worker):
    # Cannot do this with a Mock, since they (technically correctly)
    # differentiate recording calls between args and kw, so a call
    # `send_task(1, 2,  3)` is not considered equal to
    # `send_task(1, args=2, kwargs=3)`, although semantically it is the same.
    def record_task(
            name, args=None, kwargs=None, countdown=None, eta=None,
            task_id=None, producer=None, connection=None, router=None,
            result_cls=None, expires=None, publisher=None, link=None,
            link_error=None, add_to_parent=True, group_id=None, retries=0,
            chord=None, reply_to=None, time_limit=None, soft_time_limit=None,
            root_id=None, parent_id=None, route_name=None, shadow=None,
            chain=None, task_type=None, **options):
        options.update(dict(
            args=args, kwargs=kwargs, countdown=countdown,
            eta=eta, task_id=task_id, producer=producer, connection=connection,
            router=router, result_cls=result_cls, expires=expires,
            publisher=publisher, link=link, link_error=link_error,
            add_to_parent=add_to_parent, group_id=group_id, retries=retries,
            chord=chord, reply_to=reply_to, time_limit=time_limit,
            soft_time_limit=soft_time_limit, root_id=root_id,
            parent_id=parent_id, route_name=route_name, shadow=shadow,
            chain=chain, task_type=task_type
        ))
        calls.append((name, options))
    calls = []

    with mock.patch.object(CELERY, 'send_task', new=record_task):
        result = echo.apply_async(('foo',), eta=pendulum.now())
        task = get_scheduler(CELERY).backend.get(result.id)
        args = task[0]
        kw = task[1]
        # schedule() always generates an ID itself (to reuse it for the
        # scheduler storage), while the normal apply_async() defers that to
        # send_task(). We undo this here for comparison purposes.
        kw['task_id'] = None
        CELERY.send_task(*args, **kw)
        scheduled_call = calls[0]

        echo.apply_async(('foo',))
        normal_call = calls[1]
        # Special edge case, see Task._schedule() for an explanation
        normal_call[1]['result_cls'] = None
        assert scheduled_call == normal_call
Beispiel #8
0
    def test_released_to__in_future_is_retracted_later(self):
        zeit.cms.workflow.interfaces.IPublish(
            self.content).publish(background=False)
        transaction.commit()

        retract_on = datetime.now(pytz.UTC) + timedelta(seconds=1.5)
        self.workflow.release_period = (None, retract_on)
        transaction.commit()
        cancel_retract_job_id = self.workflow.retract_job_id
        scheduler = celery_longterm_scheduler.get_scheduler(
            self.layer['celery_app'])
        assert scheduler.backend.get(cancel_retract_job_id)

        # The current job gets revoked on change of released_to:
        new_retract_on = retract_on + timedelta(seconds=1)
        self.workflow.release_period = (None, new_retract_on)
        transaction.commit()
        with self.assertRaises(KeyError):
            scheduler.backend.get(cancel_retract_job_id)

        # The newly created job is pending its execution:
        new_job = self.workflow.retract_job_id
        assert scheduler.backend.get(new_job)

        # The actions are logged:
        log = zope.component.getUtility(zeit.objectlog.interfaces.IObjectLog)
        log_entries = [zope.i18n.translate(e.message)
                       for e in log.get_log(self.content)]

        def berlin(dt):
            return TimeBasedWorkflow.format_datetime(dt)

        self.assertEqual([
            u'Urgent: yes',
            u'Published',
            u'To retract on {} (job #{})'.format(
                berlin(retract_on), cancel_retract_job_id),
            u'Scheduled retract cancelled (job #{}).'.format(
                cancel_retract_job_id),
            u'To retract on {} (job #{})'.format(
                berlin(new_retract_on), self.workflow.retract_job_id),
        ], log_entries)
    def _schedule(self, timestamp, **kw):
        # Store parameters apply_async() passes to app.send_task() in addition
        # to its own **kw.
        kw['task_type'] = self
        # We don't set result_cls, since serializing instancemethods is a pain
        # and the additional settings of self.AsyncResult compared to
        # app.AsyncResult don't make a difference _inside_ send_task, so we
        # don't actually need it. And for the return value of apply_async we
        # call it ourselves anyway, see below.
        # kw['result_cls'] = self.AsyncResult

        # We use the celery task_id also for our scheduler storage; this is
        # mostly for integration purposes, e.g. so that other Task subclasses
        # can be in control of the task_id and still work when inheriting us.
        if not kw.get('task_id'):
            kw['task_id'] = celery.utils.gen_unique_id()

        scheduler = celery_longterm_scheduler.get_scheduler(self.app)
        scheduler.store(timestamp, kw['task_id'], (self.name,), kw)
        return self.AsyncResult(kw['task_id'])
Beispiel #10
0
    def test_released_from__revokes_job_on_change(self):
        publish_on = datetime.now(pytz.UTC) + timedelta(days=1)

        self.workflow.release_period = (publish_on, None)
        transaction.commit()
        job_id = self.workflow.publish_job_id
        scheduler = celery_longterm_scheduler.get_scheduler(
            self.layer['celery_app'])
        assert scheduler.backend.get(job_id)

        # The current job gets revoked on change of released_from:
        publish_on += timedelta(seconds=1)
        self.workflow.release_period = (publish_on, None)
        transaction.commit()
        with self.assertRaises(KeyError):
            scheduler.backend.get(job_id)

        # The newly created job is pending its execution:
        new_job = self.workflow.publish_job_id
        assert scheduler.backend.get(new_job)
Beispiel #11
0
    def test_released_from__in_future_is_published_later(self):
        publish_on = datetime.now(pytz.UTC) + timedelta(seconds=1.2)

        self.workflow.release_period = (publish_on, None)
        transaction.commit()

        scheduler = celery_longterm_scheduler.get_scheduler(
            self.layer['celery_app'])
        scheduler.execute_pending(publish_on)
        transaction.commit()

        result = celery.result.AsyncResult(self.workflow.publish_job_id)
        assert 'Published.' == result.get()
        self.assertEllipsis("""\
Start executing tasks...
Enqueuing...
Revoked...
End executing tasks...
Running job {0.workflow.publish_job_id} for http://xml.zeit.de/online/2007/01/Somalia
Publishing http://xml.zeit.de/online/2007/01/Somalia
...
Done http://xml.zeit.de/online/2007/01/Somalia ...""".format(self),  # noqa
                            self.log.getvalue())
def test_revoke_returns_false_for_nonexistent_id():
    scheduler = celery_longterm_scheduler.get_scheduler(CELERY)
    assert scheduler.revoke('nonexistent') is False
Beispiel #13
0
 def cancel_job(self, job_id):
     if not job_id:
         return False
     return celery_longterm_scheduler.get_scheduler(
         zeit.cms.celery.CELERY).revoke(job_id)