Exemple #1
0
    def test_add_celery_canvas(self, some_sigs):
        wf = Workflow()
        c1 = some_sigs[0] | some_sigs[1]
        wf.add_celery_chain(c1)

        assert some_sigs[0].id in wf.nodes
        assert some_sigs[1].id in wf.nodes
Exemple #2
0
    def test_add_chain_of_chords(self, some_sigs):
        wf = Workflow()
        sigs = some_sigs
        c1 = celery_canvas.chord(sigs[:4], sigs[4])
        c2 = celery_canvas.chord(sigs[5:9], sigs[9])
        canvas = celery_canvas.chain([c1, c2])

        res = wf.add_celery_canvas(canvas)

        assert res == [wf.nodes[sigs[9].id]]

        # use build_dependency_asserts(wf) to regenerate
        # build_dependency_asserts(wf)

        assert wf.nodes["task-0"].dependencies == {}  # noqa
        assert wf.nodes["task-1"].dependencies == {}  # noqa
        assert wf.nodes["task-2"].dependencies == {}  # noqa
        assert wf.nodes["task-3"].dependencies == {}  # noqa
        assert "task-0" in wf.nodes["task-4"].dependencies  # noqa
        assert "task-1" in wf.nodes["task-4"].dependencies  # noqa
        assert "task-2" in wf.nodes["task-4"].dependencies  # noqa
        assert "task-3" in wf.nodes["task-4"].dependencies  # noqa
        assert "task-4" in wf.nodes["task-5"].dependencies  # noqa
        assert "task-4" in wf.nodes["task-6"].dependencies  # noqa
        assert "task-4" in wf.nodes["task-7"].dependencies  # noqa
        assert "task-4" in wf.nodes["task-8"].dependencies  # noqa
        assert "task-5" in wf.nodes["task-9"].dependencies  # noqa
        assert "task-6" in wf.nodes["task-9"].dependencies  # noqa
        assert "task-7" in wf.nodes["task-9"].dependencies  # noqa
        assert "task-8" in wf.nodes["task-9"].dependencies  # noqa
Exemple #3
0
    def test_get_retry_countdown(self):
        wf = Workflow()
        countdown = wf.get_retry_countdown()
        assert isinstance(countdown, int)

        # assumes default retry policy
        assert 10 <= countdown <= 30
Exemple #4
0
    def test_add_celery_chain(self, some_sigs):
        wf = Workflow()
        sigs = some_sigs

        chain = celery_canvas.chain(sigs[:-2])
        dependencies = [
            wf.add_signature(sigs[-2]),
            wf.add_signature(sigs[-1]),
        ]
        res = wf.add_celery_chain(chain, dependencies)

        assert res == [wf.nodes[sigs[-3].id]]

        # use build_dependency_asserts(wf) to regenerate
        # build_dependency_asserts(wf)
        assert "task-8" in wf.nodes["task-0"].dependencies  # noqa
        assert "task-9" in wf.nodes["task-0"].dependencies  # noqa
        assert "task-0" in wf.nodes["task-1"].dependencies  # noqa
        assert "task-1" in wf.nodes["task-2"].dependencies  # noqa
        assert "task-2" in wf.nodes["task-3"].dependencies  # noqa
        assert "task-3" in wf.nodes["task-4"].dependencies  # noqa
        assert "task-4" in wf.nodes["task-5"].dependencies  # noqa
        assert "task-5" in wf.nodes["task-6"].dependencies  # noqa
        assert "task-6" in wf.nodes["task-7"].dependencies  # noqa
        assert wf.nodes["task-8"].dependencies == {}  # noqa
        assert wf.nodes["task-9"].dependencies == {}  # noqa
Exemple #5
0
    def test_add_celery_group(self, some_sigs):
        wf = Workflow()
        sigs = some_sigs

        group = celery_canvas.group(sigs[:4])
        dependencies = [
            wf.add_signature(sigs[-2]),
            wf.add_signature(sigs[-1]),
        ]
        res = wf.add_celery_group(group, dependencies)
        assert set(res) == set([
            wf.nodes['task-0'],
            wf.nodes['task-1'],
            wf.nodes['task-2'],
            wf.nodes['task-3'],
        ])
        # use build_dependency_asserts(wf) to regenerate
        # build_dependency_asserts(wf)
        assert "task-8" in wf.nodes["task-0"].dependencies  # noqa
        assert "task-9" in wf.nodes["task-0"].dependencies  # noqa
        assert "task-8" in wf.nodes["task-1"].dependencies  # noqa
        assert "task-9" in wf.nodes["task-1"].dependencies  # noqa
        assert "task-8" in wf.nodes["task-2"].dependencies  # noqa
        assert "task-9" in wf.nodes["task-2"].dependencies  # noqa
        assert "task-8" in wf.nodes["task-3"].dependencies  # noqa
        assert "task-9" in wf.nodes["task-3"].dependencies  # noqa
        assert wf.nodes["task-8"].dependencies == {}  # noqa
        assert wf.nodes["task-9"].dependencies == {}  # noqa
Exemple #6
0
    def test_freeze(self, app):
        wf = Workflow()
        sig = wf.freeze()

        assert isinstance(sig, entities.Signature)
        assert sig.id
        assert sig.kwargs
        assert wf.processing_limit_ts
Exemple #7
0
    def get_canvas_order(self, canvas):
        workflow = Workflow()
        workflow.add_celery_canvas(canvas)
        result = workflow.apply_async()
        result.wait()

        logs_result = read_logs.delay()
        return self.parse_logs(logs_result.get())
    def test_processing_limit(self):
        wf = Workflow()
        assert wf.max_processing_time == 604800

        wf.set_max_processing_time(1200)
        assert wf.max_processing_time == 1200

        wf2 = Workflow()
        assert wf2.max_processing_time == 604800
    def test_set_state(self, wf, clear_wf_hooks):
        handler = mock.Mock()
        Workflow.connect('on_state_change')(handler)

        wf.set_state('INITIAL')
        handler.assert_not_called()

        wf.set_state('DUMMY')
        handler.assert_called_with(wf, 'DUMMY')
    def test_get_retry_countdown(self):
        wf = Workflow()
        with mock.patch('celery_dyrygent.workflows.workflow.random') as mck:
            mck.randint.return_value = 777
            res = wf.get_retry_countdown()

            mck.randint.assert_called_with(10, 30)

            assert res == 777

            wf._active_tick = True
            assert wf.get_retry_countdown() == 2
def register_workflow_processor(app, **decorator_kws):
    """
    Attach workflow tasks to given celery app instance
    """
    decorator_defaults = dict(
        name='workflow-processor',
        bind=True,
        max_retries=1,
    )
    if decorator_kws is not None:
        decorator_defaults.update(decorator_kws)

    task = app.task(**decorator_defaults)(workflow_processor)

    Workflow.set_workflow_processor_task(task)
    return task
    def test_simulate_run_group_group_fixed(self, some_sigs):
        wf = Workflow()
        group1 = celery_canvas.group(some_sigs[:4])
        group2 = celery_canvas.group(some_sigs[4:9])

        canvas = group1 | group2
        wf.add_celery_canvas(canvas)

        # use build_exec_asserts helper to rebuild asserts
        # build_exec_asserts(wf)
        assert wf.simulate_tick()
        assert wf.running == {
            'task-0': True,
            'task-1': True,
            'task-2': True,
            'task-3': True
        }  # noqa
        assert wf.simulate_tick()
        assert wf.running == {
            'task-4': True,
            'task-5': True,
            'task-6': True,
            'task-7': True,
            'task-8': True
        }  # noqa
        assert not wf.simulate_tick()
Exemple #13
0
    def test_add_empty_chain(self, some_sigs):
        """
            task -> chain([]) -> task
            Verify if dependencies are built correctly
        """
        wf = Workflow()

        c0 = some_sigs[0]
        empty_chain = celery_canvas.chain([])
        c1 = some_sigs[1]

        canvas = c0 | empty_chain | c1

        wf.add_celery_canvas(canvas)

        # use build_dependency_asserts(wf) to regenerate
        build_dependency_asserts(wf)
        assert wf.nodes["task-0"].dependencies == {}  # noqa
        assert "task-0" in wf.nodes["task-1"].dependencies  # noqa
Exemple #14
0
    def test_to_from_dict(self, some_sigs):
        wf = Workflow()
        chain1 = celery_canvas.chain(some_sigs[:3])
        wf.add_celery_canvas(chain1)

        wf_dict = wf.to_dict()
        wf2 = Workflow.from_dict(wf_dict)

        assert wf.nodes.keys() == wf2.nodes.keys()

        # use build_exec_asserts helper to rebuild asserts
        # build_exec_asserts(wf)
        assert wf.simulate_tick()
        assert wf.running == {'task-0': True}  # noqa
        assert wf.simulate_tick()
        assert wf.running == {'task-1': True}  # noqa
        assert wf.simulate_tick()
        assert wf.running == {'task-2': True}  # noqa
        assert not wf.simulate_tick()
Exemple #15
0
    def test_add_complex(self, some_sigs):
        wf = Workflow()

        c0 = some_sigs[0]
        c1 = some_sigs[5]
        chain1 = celery_canvas.chain(some_sigs[1:3])
        chain2 = celery_canvas.chain(some_sigs[3:5])
        chord = celery_canvas.chord([chain1, chain2], c1)
        canvas = c0 | chord

        wf.add_celery_canvas(canvas)

        # use build_dependency_asserts(wf) to regenerate
        # build_dependency_asserts(wf)
        assert wf.nodes["task-0"].dependencies == {}  # noqa
        assert "task-0" in wf.nodes["task-1"].dependencies  # noqa
        assert "task-1" in wf.nodes["task-2"].dependencies  # noqa
        assert "task-0" in wf.nodes["task-3"].dependencies  # noqa
        assert "task-3" in wf.nodes["task-4"].dependencies  # noqa
        assert "task-2" in wf.nodes["task-5"].dependencies  # noqa
        assert "task-4" in wf.nodes["task-5"].dependencies  # noqa
def workflow_processor(self, workflow_dict):
    workflow_dict['id'] = self.request.id
    wf = Workflow.from_dict(workflow_dict)
    still_running = wf.tick()

    if still_running:
        # we can retry infinitely due to processing limit timestamp
        self.request.retries -= 1
        self.retry(
            kwargs=dict(workflow_dict=wf.to_dict()),
            countdown=wf.get_retry_countdown(),
        )
 def test_evaluate_processing_limit(self):
     wf = Workflow()
     wf.set_max_processing_time(1200)
     with mock.patch('celery_dyrygent.workflows.workflow.time') as mck:
         mck.time.return_value = 100
         wf.evaluate_processing_limit()
         wf.processing_limit_ts == 1300
Exemple #18
0
    def test_simulate_run_group_group(self, some_sigs):
        wf = Workflow()
        group1 = celery_canvas.group(some_sigs[:4])
        group2 = celery_canvas.group(some_sigs[4:9])

        # What celery does here is actually # group() | group() -> single group
        # this is rather unexpected as group 2 should wait for all tasks from
        # group 1...
        canvas = group1 | group2
        wf.add_celery_canvas(canvas)

        # use build_exec_asserts helper to rebuild asserts
        # build_exec_asserts(wf)
        assert wf.simulate_tick()
        assert wf.running == {
            'task-8': True,
            'task-6': True,
            'task-7': True,
            'task-4': True,
            'task-5': True,
            'task-2': True,
            'task-3': True,
            'task-0': True,
            'task-1': True
        }  # noqa
        assert not wf.simulate_tick()
Exemple #19
0
    def test_from_dict_with_custom_payload(self):
        wf_dict = {
            'finished': {
                '1': False
            },
            'running': {
                '2': True
            },
            'nodes': {
                'some': 'data',
                'some2': 'data2'
            },
            'processing_limit_ts': 5000,
            'version': 1,
            'retry_policy': ['random', 10, 30],
            'stats': {
                'last_apply_async_tick': 0,
                'ticks': 25
            },
            'id': None,
            'state': 'RUNNING',
            'custom_payload': {
                'foo': 'bar'
            }
        }
        with mock.patch.object(WorkflowNode, 'from_dict') as mck:
            mck.return_value = 'some_result'
            wf = Workflow.from_dict(wf_dict)
            assert wf.state == 'RUNNING'
            assert wf.running == {'2': True}
            assert wf.finished == {'1': False}
            assert wf.version == 1
            assert wf.nodes == {
                'some': 'some_result',
                'some2': 'some_result',
            }
            assert wf.stats == {
                'last_apply_async_tick': 0,
                'ticks': 25,
                'consecutive_celery_error_ticks': 0,
            }

            assert wf.processing_limit_ts == 5000
            assert wf.custom_payload == {'foo': 'bar'}

            # order of calls may not be preserved in older versions of Python
            # as dict order was non-deterministic < 3.7
            mck.assert_has_calls([
                mock.call('data'),
                mock.call('data2'),
            ],
                                 any_order=True)
Exemple #20
0
    def test_add_empty_group(self, some_sigs):
        """
            task -> group([])
            Verify if dependencies are built correctly
        """
        wf = Workflow()

        c0, c1 = some_sigs[0:2]
        empty_group = celery_canvas.group([])
        empty_group2 = celery_canvas.group([])

        # celery optimizes group | task into chord
        # so lets use group | group to verify if dependencies
        # are propagated properly
        canvas = c0 | empty_group | empty_group2 | c1

        wf.add_celery_canvas(canvas)

        # use build_dependency_asserts(wf) to regenerate
        # build_dependency_asserts(wf)
        assert wf.nodes["task-0"].dependencies == {}  # noqa
        assert "task-0" in wf.nodes["task-1"].dependencies  # noqa
Exemple #21
0
    def test_schedule_node_exec_uses_task_options(self):
        wf = Workflow()
        task_cls = mock.Mock()
        signature = mock.Mock()
        node = mock.Mock()
        node.id = 'task_node'
        task_cls.subtask.return_value = signature
        signature.id = 'dummy'
        Workflow.set_workflow_processor_task(task_cls)

        options = {'option': True}
        wf.apply_async(options=options)
        wf.schedule_node_exec(node)

        signature.apply_async.assert_called_with()
        node.signature.apply_async.assert_called_with(**options)
    def test_apply_async(self):
        wf = Workflow()
        task_cls = mock.Mock()
        signature = mock.Mock()
        task_cls.subtask.return_value = signature
        signature.id = 'dummy'
        Workflow.set_workflow_processor_task(task_cls)

        wf.apply_async()
        assert wf.id == 'dummy'

        # trickery, task_cls.subtask was called before ID was assigned
        # to workflow, while now wf.to_dict has id field set
        # just set it back to None
        wf.id = None
        task_cls.subtask.assert_called_with(kwargs=dict(
            workflow_dict=wf.to_dict()))
        signature.apply_async.assert_called()
Exemple #23
0
    def test_simulate_run_chain_chord_chain(self, some_sigs):
        wf = Workflow()
        c0 = some_sigs[0]
        c1 = some_sigs[5]
        chain1 = celery_canvas.chain(some_sigs[1:3])
        chain2 = celery_canvas.chain(some_sigs[3:5])
        chord = celery_canvas.chord([chain1, chain2], c1)
        canvas = c0 | chord

        wf.add_celery_canvas(canvas)

        # use build_exec_asserts helper to rebuild asserts
        # build_exec_asserts(wf)
        assert wf.simulate_tick()
        assert wf.running == {'task-0': True}  # noqa
        assert wf.simulate_tick()
        assert wf.running == {'task-3': True, 'task-1': True}  # noqa
        assert wf.simulate_tick()
        assert wf.running == {'task-4': True, 'task-2': True}  # noqa
        assert wf.simulate_tick()
        assert wf.running == {'task-5': True}  # noqa
        assert not wf.simulate_tick()
    def test_from_dict(self):
        wf_dict = {
            'finished': {
                '1': False
            },
            'running': {
                '2': True
            },
            'nodes': {
                'some': 'data',
                'some2': 'data2'
            },
            'processing_limit_ts': 5000,
            'version': 1,
            'retry_policy': ['random', 10, 30],
            'stats': {
                'last_apply_async_tick': 0,
                'ticks': 25
            },
            'id': None,
            'state': 'RUNNING',
        }
        with mock.patch.object(WorkflowNode, 'from_dict') as mck:
            mck.return_value = 'some_result'
            wf = Workflow.from_dict(wf_dict)
            assert wf.state == 'RUNNING'
            assert wf.running == {'2': True}
            assert wf.finished == {'1': False}
            assert wf.version == 1
            assert wf.nodes == {
                'some': 'some_result',
                'some2': 'some_result',
            }
            assert wf.stats == {
                'last_apply_async_tick': 0,
                'ticks': 25,
                'consecutive_celery_error_ticks': 0,
            }

            assert wf.processing_limit_ts == 5000
            assert wf.custom_payload == {}

            mck.assert_has_calls([
                mock.call('data'),
                mock.call('data2'),
            ])
Exemple #25
0
    def test_return_value(self, test_name):
        canvas = return_value_task.s(test_name)

        workflow = Workflow()
        workflow.add_celery_canvas(canvas)
        workflow.apply_async().wait()

        task_id = list(workflow.nodes.keys())[0]
        logs_result = read_logs.delay()
        results = self.parse_logs(logs_result.get())[test_name]

        assert task_id == results[0]
 def test_connect_chaining(self, clear_wf_hooks):
     hook = mock.Mock()
     Workflow.connect('after_active_tick')(
         Workflow.connect('on_finish')(hook))
     assert Workflow.hooks['after_active_tick'] == [hook]
     assert Workflow.hooks['on_finish'] == [hook]
 def test_set_retry_policy(self):
     wf = Workflow()
     wf.set_retry_policy('random', 30, 60)
     assert wf.retry_policy == ['random', 30, 60]
Exemple #28
0
 def test_add_celery_signature(self):
     wf = Workflow()
     sig = celery_canvas.Signature()
     res = wf.add_celery_signature(sig)
     node = res[0]
     assert node.id == sig.id
Exemple #29
0
    def test_simulate_run_chain_group_chain(self, some_sigs):
        wf = Workflow()
        chain1 = celery_canvas.chain(some_sigs[:3])
        group1 = celery_canvas.group(some_sigs[3:7])
        chain2 = celery_canvas.chain(some_sigs[7:9])

        canvas = chain1 | group1 | chain2
        wf.add_celery_canvas(canvas)

        # use build_exec_asserts helper to rebuild asserts
        # build_exec_asserts(wf)
        assert wf.simulate_tick()
        assert wf.running == {'task-0': True}  # noqa
        assert wf.simulate_tick()
        assert wf.running == {'task-1': True}  # noqa
        assert wf.simulate_tick()
        assert wf.running == {'task-2': True}  # noqa
        assert wf.simulate_tick()
        assert wf.running == {
            'task-6': True,
            'task-4': True,
            'task-5': True,
            'task-3': True
        }  # noqa
        assert wf.simulate_tick()
        assert wf.running == {'task-7': True}  # noqa
        assert wf.simulate_tick()
        assert wf.running == {'task-8': True}  # noqa
        assert not wf.simulate_tick()
 def test_connect_invalid_signal(self, clear_wf_hooks):
     with pytest.raises(WorkflowException, match=r'invalid signal'):
         Workflow.connect('invalid')(mock.Mock())