Ejemplo n.º 1
0
def legacy_workflow_stepper():
    """
    Function to tick the legacy workflows
    """
    from datetime import timedelta
    from aiida.daemon.timestamps import set_timestamp_workflow_stepper, get_timestamp_workflow_stepper
    from aiida.daemon.workflowmanager import execute_steps

    logger.debug('Checking for workflows to manage')
    # RUDIMENTARY way to check if this task is already running (to avoid acting
    # again and again on the same workflow steps)
    try:
        stepper_is_running = (
            get_timestamp_workflow_stepper(when='stop') -
            get_timestamp_workflow_stepper(when='start')) <= timedelta(0)
    except TypeError:
        # When some timestamps are None (undefined)
        stepper_is_running = (
            get_timestamp_workflow_stepper(when='stop') is None
            and get_timestamp_workflow_stepper(when='start') is not None)

    if not stepper_is_running:
        # The previous wf manager stopped already -> we can run a new one
        set_timestamp_workflow_stepper(when='start')
        logger.debug('Running execute_steps')
        execute_steps()
        set_timestamp_workflow_stepper(when='stop')
    else:
        logger.debug('Execute_steps already running')
Ejemplo n.º 2
0
    def test_failing_calc_in_wf(self):
        """
        This test checks that a workflow (but also a workflow with
        sub-workflows) that has an exception at one of its steps stops
        properly and it is not left as RUNNING.
        """
        from aiida.daemon.workflowmanager import execute_steps
        from aiida.workflows.test import (FailingWFTestSimple,
                                          FailingWFTestSimpleWithSubWF)

        try:
            # Testing the error propagation of a simple workflow
            wf = FailingWFTestSimple()
            wf.store()
            step_no = 0
            wf.start()
            while wf.is_running():
                execute_steps()
                step_no += 1
                self.assertLess(step_no, 5, "This workflow should have stopped "
                                            "since it is failing")

            # Testing the error propagation of a workflow with subworkflows
            wf = FailingWFTestSimpleWithSubWF()
            wf.store()

            step_no = 0
            wf.start()
            while wf.is_running():
                execute_steps()
                step_no += 1
                self.assertLess(step_no, 5, "This workflow should have stopped "
                                            "since it is failing")
        finally:
            pass
Ejemplo n.º 3
0
def manual_tick_all():
    from aiida.daemon.execmanager import submit_jobs, update_jobs, retrieve_jobs
    from aiida.work.daemon import tick_workflow_engine
    from aiida.daemon.workflowmanager import execute_steps
    submit_jobs()
    update_jobs()
    retrieve_jobs()
    execute_steps()  # legacy workflows
    tick_workflow_engine()
Ejemplo n.º 4
0
    def test_failing_calc_in_wf(self):
        """
        This test checks that a workflow (but also a workflow with
        sub-workflows) that has an exception at one of its steps stops
        properly and it is not left as RUNNING.
        """
        import logging
        from aiida.daemon.workflowmanager import execute_steps
        from aiida.workflows.test import (FailingWFTestSimple,
                                          FailingWFTestSimpleWithSubWF)

        try:
            # First of all, I re-enable logging in case it was disabled by
            # mistake by a previous test (e.g. one that disables and reenables
            # again, but that failed)
            logging.disable(logging.NOTSET)
            # Temporarily disable logging to the stream handler (i.e. screen)
            # because otherwise fix_calc_states will print warnings
            handler = next((h for h in logging.getLogger('aiida').handlers
                            if isinstance(h, logging.StreamHandler)), None)
            if handler:
                original_level = handler.level
                handler.setLevel(logging.ERROR)

            # Testing the error propagation of a simple workflow
            wf = FailingWFTestSimple()
            wf.store()
            step_no = 0
            wf.start()
            while wf.is_running():
                execute_steps()
                step_no += 1
                self.assertLess(
                    step_no, 5, "This workflow should have stopped "
                    "since it is failing")

            # Testing the error propagation of a workflow with subworkflows
            wf = FailingWFTestSimpleWithSubWF()
            wf.store()

            step_no = 0
            wf.start()
            while wf.is_running():
                execute_steps()
                step_no += 1
                self.assertLess(
                    step_no, 5, "This workflow should have stopped "
                    "since it is failing")
        finally:
            if handler:
                handler.setLevel(original_level)
Ejemplo n.º 5
0
    def test_old_wf_results(self):
        wf = WorkflowDemo()
        wf.start()
        while wf.is_running():
            execute_steps()

        class _TestWf(WorkChain):
            @classmethod
            def define(cls, spec):
                super(_TestWf, cls).define(spec)
                spec.outline(cls.start, cls.check)

            def start(self):
                return ToContext(res=Outputs(legacy_workflow(wf.pk)))

            def check(self):
                assert set(self.ctx.res) == set(wf.get_results())

        _TestWf.new_instance().run_until_complete()
Ejemplo n.º 6
0
    def test_call_old_wf(self):
        wf = WorkflowDemo()
        wf.start()
        while wf.is_running():
            execute_steps()

        class _TestWf(WorkChain):
            @classmethod
            def define(cls, spec):
                super(_TestWf, cls).define(spec)
                spec.outline(cls.start, cls.check)

            def start(self):
                return ToContext(wf=legacy_workflow(wf.pk))

            def check(self):
                assert self.ctx.wf is not None

        _TestWf.new_instance().run_until_complete()
Ejemplo n.º 7
0
    def test_call_old_wf(self):
        wf = WorkflowDemo()
        wf.start()
        while wf.is_running():
            execute_steps()

        class _TestWf(WorkChain):
            @classmethod
            def define(cls, spec):
                super(_TestWf, cls).define(spec)
                spec.outline(cls.begin, cls.check)

            def begin(self):
                return ToContext(wf=wf)

            def check(self):
                assert self.ctx.wf is not None

        run_and_check_success(_TestWf)
Ejemplo n.º 8
0
    def test_old_wf_results(self):
        wf = WorkflowDemo()
        wf.start()
        while wf.is_running():
            execute_steps()

        test_case = self

        class _TestWf(WorkChain):
            @classmethod
            def define(cls, spec):
                super(_TestWf, cls).define(spec)
                spec.outline(cls.begin, cls.check)

            def begin(self):
                return ToContext(res=wf)

            def check(self):
                test_case.assertEquals(self.ctx.res.pk, wf.pk)

        run_and_check_success(_TestWf)
Ejemplo n.º 9
0
    def test_call_on_wf_finish(self):
        loop = self.runner.loop
        future = plumpy.Future()

        # Need to start() so it's stored
        wf = WorkflowDemo()
        wf.start()

        def wf_done(pk):
            self.assertEqual(pk, wf.pk)
            loop.stop()
            future.set_result(True)

        self.runner.call_on_legacy_workflow_finish(wf.pk, wf_done)

        # Run the wf
        while wf.is_running():
            execute_steps()

        self._run_loop_for(10.)
        self.assertTrue(future.result())
Ejemplo n.º 10
0
def workflow_stepper(): # daemon for legacy workflow 
    from aiida.daemon.workflowmanager import execute_steps
    print "aiida.daemon.tasks.workflowmanager:  Checking for workflows to manage"
    # RUDIMENTARY way to check if this task is already running (to avoid acting
    # again and again on the same workflow steps)
    try:
        stepper_is_running = (get_last_daemon_timestamp('workflow',when='stop')
            -get_last_daemon_timestamp('workflow',when='start'))<=timedelta(0)
    except TypeError:
        # when some timestamps are None (undefined)
        stepper_is_running = (get_last_daemon_timestamp('workflow',when='stop')
            is None and get_last_daemon_timestamp('workflow',when='start') is not None)
        
    if not stepper_is_running:
        set_daemon_timestamp(task_name='workflow', when='start')
        # the previous wf manager stopped already -> we can run a new one
        print "aiida.daemon.tasks.workflowmanager: running execute_steps"
        execute_steps()
        set_daemon_timestamp(task_name='workflow', when='stop')
    else:
        print "aiida.daemon.tasks.workflowmanager: execute_steps already running"
Ejemplo n.º 11
0
def workflow_stepper():  # daemon for legacy workflow
    configure_logging(daemon=True, daemon_log_file=DAEMON_LOG_FILE)
    from aiida.daemon.workflowmanager import execute_steps
    LOGGER.info('Checking for workflows to manage')
    # RUDIMENTARY way to check if this task is already running (to avoid acting
    # again and again on the same workflow steps)
    try:
        stepper_is_running = (get_last_daemon_timestamp(
            'workflow', when='stop') - get_last_daemon_timestamp(
                'workflow', when='start')) <= timedelta(0)
    except TypeError:
        # when some timestamps are None (undefined)
        stepper_is_running = (
            get_last_daemon_timestamp('workflow', when='stop') is None and
            get_last_daemon_timestamp('workflow', when='start') is not None)

    if not stepper_is_running:
        set_daemon_timestamp(task_name='workflow', when='start')
        # the previous wf manager stopped already -> we can run a new one
        LOGGER.info('running execute_steps')
        execute_steps()
        set_daemon_timestamp(task_name='workflow', when='stop')
    else:
        LOGGER.info('execute_steps already running')
Ejemplo n.º 12
0
def workflow_stepper():  # daemon for legacy workflow
    from aiida.daemon.workflowmanager import execute_steps
    print "aiida.daemon.tasks.workflowmanager:  Checking for workflows to manage"
    set_daemon_timestamp(task_name='workflow', when='start')
    execute_steps()
    set_daemon_timestamp(task_name='workflow', when='stop')