Пример #1
0
 def analyze(old_state, new_state, event):
     next_nodes = set()
     while memory.done:
         fut = memory.done.pop()
         try:
             node, event, result = fut.result()
             retain = self._completer.complete(node, event, result)
             if retain and isinstance(result, misc.Failure):
                 memory.failures.append(result)
         except Exception:
             memory.failures.append(misc.Failure())
         else:
             try:
                 more_nodes = self._analyzer.get_next_nodes(node)
             except Exception:
                 memory.failures.append(misc.Failure())
             else:
                 next_nodes.update(more_nodes)
     if self.runnable() and next_nodes and not memory.failures:
         memory.next_nodes.update(next_nodes)
         return 'schedule'
     elif memory.not_done:
         return 'wait'
     else:
         return 'finished'
Пример #2
0
 def test_two_recaptured_neq(self):
     captured = _captured_failure('Woot!')
     fail_obj = misc.Failure(exception_str=captured.exception_str,
                             traceback_str=captured.traceback_str,
                             exc_type_names=list(captured))
     new_exc_str = captured.exception_str.replace('Woot', 'w00t')
     fail_obj2 = misc.Failure(exception_str=new_exc_str,
                              traceback_str=captured.traceback_str,
                              exc_type_names=list(captured))
     self.assertNotEquals(fail_obj, fail_obj2)
     self.assertFalse(fail_obj2.matches(fail_obj))
Пример #3
0
    def _process_request(self, request, message):
        """Process request in separate thread and reply back."""
        # parse broker message first to get the `reply_to` and the `task_uuid`
        # parameters to have possibility to reply back
        try:
            reply_to, task_uuid = self._parse_message(message)
        except ValueError as e:
            LOG.error("Failed to parse broker message: %s" % e)
            return
        else:
            # prepare task progress callback
            progress_callback = functools.partial(
                self._on_update_progress, reply_to, task_uuid)
            # prepare reply callback
            reply_callback = functools.partial(
                self._reply, reply_to, task_uuid)

        # parse request to get task name, action and action arguments
        try:
            task, action, action_args = self._parse_request(**request)
            action_args.update(task_uuid=task_uuid,
                               progress_callback=progress_callback)
        except ValueError as e:
            LOG.error("Failed to parse request: %s" % e)
            reply_callback(result=pu.failure_to_dict(misc.Failure()))
            return

        # get task endpoint
        try:
            endpoint = self._endpoints[task]
        except KeyError:
            LOG.error("The '%s' task endpoint does not exist." % task)
            reply_callback(result=pu.failure_to_dict(misc.Failure()))
            return
        else:
            reply_callback(state=pr.RUNNING)

        # perform task action
        try:
            result = getattr(endpoint, action)(**action_args)
        except Exception as e:
            LOG.error("The %s task execution failed: %s" % (endpoint, e))
            reply_callback(result=pu.failure_to_dict(misc.Failure()))
        else:
            if isinstance(result, misc.Failure):
                reply_callback(result=pu.failure_to_dict(result))
            else:
                reply_callback(state=pr.SUCCESS, result=result)
Пример #4
0
    def test_task_detail_with_failure(self):
        lb_id = uuidutils.generate_uuid()
        lb_name = 'lb-%s' % (lb_id)
        lb = logbook.LogBook(name=lb_name, uuid=lb_id)
        fd = logbook.FlowDetail('test', uuid=uuidutils.generate_uuid())
        lb.add(fd)
        td = logbook.TaskDetail("detail-1", uuid=uuidutils.generate_uuid())

        try:
            raise RuntimeError('Woot!')
        except Exception:
            td.failure = misc.Failure()

        fd.add(td)

        with contextlib.closing(self._get_connection()) as conn:
            conn.save_logbook(lb)
            conn.update_flow_details(fd)
            conn.update_task_details(td)

        # Read failure back
        with contextlib.closing(self._get_connection()) as conn:
            lb2 = conn.get_logbook(lb_id)
        fd2 = lb2.find(fd.uuid)
        td2 = fd2.find(td.uuid)
        failure = td2.failure
        self.assertEqual(failure.exception_str, 'Woot!')
        self.assertIs(failure.check(RuntimeError), RuntimeError)
        self.assertEqual(failure.traceback_str, td.failure.traceback_str)
Пример #5
0
 def _submit_task(self,
                  task,
                  task_uuid,
                  action,
                  arguments,
                  progress_callback,
                  timeout=pr.REQUEST_TIMEOUT,
                  **kwargs):
     """Submit task request to workers."""
     remote_task = self._store_remote_task(
         rt.RemoteTask(task, task_uuid, action, arguments,
                       progress_callback, timeout, **kwargs))
     try:
         # get task's workers topic to send request to
         try:
             topic = self._workers_info[remote_task.name]
         except KeyError:
             raise exc.NotFound("Workers topic not found for the '%s'"
                                "task." % remote_task.name)
         else:
             # publish request
             request = remote_task.request
             LOG.debug("Sending request: %s" % request)
             self._proxy.publish(request,
                                 remote_task.uuid,
                                 routing_key=topic,
                                 reply_to=self._uuid)
     except Exception as e:
         LOG.error("Failed to submit the '%s' task: %s" % (remote_task, e))
         self._remove_remote_task(remote_task)
         remote_task.set_result(misc.Failure())
     return remote_task.result
Пример #6
0
 def call_fn(action):
     try:
         fn(action)
     except Exception:
         return misc.Failure()
     else:
         return None
Пример #7
0
 def _execute_retry(kwargs, failures):
     kwargs['flow_failures'] = failures
     try:
         result = retry.revert(**kwargs)
     except Exception:
         result = misc.Failure()
     return (retry, ex.REVERTED, result)
Пример #8
0
 def test_save_and_get_failure(self):
     fail = misc.Failure(exc_info=(RuntimeError, RuntimeError(), None))
     s = self._get_storage()
     s.add_task('42', 'my task')
     s.save('42', fail, states.FAILURE)
     self.assertEquals(s.get('42'), fail)
     self.assertEquals(s.get_task_state('42'), states.FAILURE)
Пример #9
0
 def test_recaptured_not_eq(self):
     captured = _captured_failure('Woot!')
     fail_obj = misc.Failure(exception_str=captured.exception_str,
                             traceback_str=captured.traceback_str,
                             exc_type_names=list(captured))
     self.assertFalse(fail_obj == captured)
     self.assertTrue(fail_obj != captured)
     self.assertTrue(fail_obj.matches(captured))
Пример #10
0
def _execute_task(task, arguments, progress_callback):
    with task.autobind('update_progress', progress_callback):
        try:
            result = task.execute(**arguments)
        except Exception:
            # NOTE(imelnikov): wrap current exception with Failure
            # object and return it.
            result = misc.Failure()
    return (task, EXECUTED, result)
Пример #11
0
 def test_unknown_argument(self):
     with self.assertRaises(TypeError) as ctx:
         misc.Failure(
             exception_str='Woot!',
             traceback_str=None,
             exc_type_names=['Exception'],
             hi='hi there')
     expected = "Failure.__init__ got unexpected keyword argument(s): hi"
     self.assertEquals(str(ctx.exception), expected)
Пример #12
0
 def _run(self):
     self._change_state(states.RUNNING)
     try:
         state = self._root.execute(self)
     except Exception:
         self._change_state(states.FAILURE)
         self._revert(misc.Failure())
     else:
         self._change_state(state)
Пример #13
0
 def test_failure_copy_recaptured(self):
     captured = _captured_failure('Woot!')
     fail_obj = misc.Failure(exception_str=captured.exception_str,
                             traceback_str=captured.traceback_str,
                             exc_type_names=list(captured))
     copied = fail_obj.copy()
     self.assertIsNot(fail_obj, copied)
     self.assertEquals(fail_obj, copied)
     self.assertFalse(fail_obj != copied)
     self.assertTrue(fail_obj.matches(copied))
Пример #14
0
    def test_get_failure_from_reverted_task(self):
        fail = misc.Failure(exc_info=(RuntimeError, RuntimeError(), None))
        s = self._get_storage()
        s.add_task('42', 'my task')
        s.save('42', fail, states.FAILURE)

        s.set_task_state('42', states.REVERTING)
        self.assertEquals(s.get('42'), fail)

        s.set_task_state('42', states.REVERTED)
        self.assertEquals(s.get('42'), fail)
Пример #15
0
    def test_flattening(self):
        f1 = _captured_failure('Wrap me')
        f2 = _captured_failure('Wrap me, too')
        f3 = _captured_failure('Woot!')
        try:
            raise exceptions.WrappedFailure([f1, f2])
        except Exception:
            fail_obj = misc.Failure()

        wf = exceptions.WrappedFailure([fail_obj, f3])
        self.assertEquals(list(wf), [f1, f2, f3])
Пример #16
0
def wrap_all_failures():
    """Convert any exceptions to WrappedFailure.

    When you expect several failures, it may be convenient
    to wrap any exception with WrappedFailure in order to
    unify error handling.
    """
    try:
        yield
    except Exception:
        raise exceptions.WrappedFailure([misc.Failure()])
Пример #17
0
def _revert_task(task, arguments, result, failures, progress_callback):
    kwargs = arguments.copy()
    kwargs['result'] = result
    kwargs['flow_failures'] = failures
    with task.autobind('update_progress', progress_callback):
        try:
            result = task.revert(**kwargs)
        except Exception:
            # NOTE(imelnikov): wrap current exception with Failure
            # object and return it.
            result = misc.Failure()
    return (task, REVERTED, result)
Пример #18
0
def failure_from_dict(data):
    """Restore misc.Failure object from dict.

    The dict should be similar to what failure_to_dict() function produces.
    """
    if not data:
        return None
    version = data.pop('version', None)
    if version != 1:
        raise ValueError('Invalid version of saved Failure object: %r' %
                         version)
    return misc.Failure(**data)
Пример #19
0
 def execute(self, engine):
     if engine.storage.get_task_state(self.uuid) == states.SUCCESS:
         return
     try:
         kwargs = engine.storage.fetch_mapped_args(self._args_mapping)
         self._change_state(engine, states.RUNNING)
         result = self._task.execute(**kwargs)
     except Exception:
         failure = misc.Failure()
         self._update_result(engine, states.FAILURE, failure)
         failure.reraise()
     else:
         self._update_result(engine, states.SUCCESS, result)
Пример #20
0
def _revert_task(task, arguments, result, failures, progress_callback):
    kwargs = arguments.copy()
    kwargs[_task.REVERT_RESULT] = result
    kwargs[_task.REVERT_FLOW_FAILURES] = failures
    with task.autobind('update_progress', progress_callback):
        try:
            task.pre_revert()
            result = task.revert(**kwargs)
        except Exception:
            # NOTE(imelnikov): wrap current exception with Failure
            # object and return it.
            result = misc.Failure()
        finally:
            task.post_revert()
    return (task, REVERTED, result)
Пример #21
0
 def test_connect(failures):
     try:
         # See if we can make a connection happen.
         #
         # NOTE(harlowja): note that even though we are connecting
         # once it does not mean that we will be able to connect in
         # the future, so this is more of a sanity test and is not
         # complete connection insurance.
         with contextlib.closing(engine.connect()):
             pass
     except sa_exc.OperationalError as ex:
         if _is_db_connection_error(str(ex.args[0])):
             failures.append(misc.Failure())
             return False
     return True
Пример #22
0
 def execute(self, engine):
     if not self._change_state_update_task(engine, states.RUNNING, 0.0):
         return
     with _autobind(self._task,
                    'update_progress',
                    self._on_update_progress,
                    engine=engine):
         try:
             kwargs = engine.storage.fetch_mapped_args(self._task.rebind)
             result = self._task.execute(**kwargs)
         except Exception:
             failure = misc.Failure()
             self._change_state(engine, states.FAILURE, result=failure)
             failure.reraise()
     self._change_state_update_task(engine,
                                    states.SUCCESS,
                                    1.0,
                                    result=result)
Пример #23
0
    def schedule(self, nodes):
        """Schedules the provided nodes for *future* completion.

        This method should schedule a future for each node provided and return
        a set of those futures to be waited on (or used for other similar
        purposes). It should also return any failure objects that represented
        scheduling failures that may have occurred during this scheduling
        process.
        """
        futures = set()
        for node in nodes:
            try:
                futures.add(self._schedule_node(node))
            except Exception:
                # Immediately stop scheduling future work so that we can
                # exit execution early (rather than later) if a single task
                # fails to schedule correctly.
                return (futures, [misc.Failure()])
        return (futures, [])
Пример #24
0
 def setUp(self):
     super(ReCreatedFailureTestCase, self).setUp()
     fail_obj = _captured_failure('Woot!')
     self.fail_obj = misc.Failure(exception_str=fail_obj.exception_str,
                                  traceback_str=fail_obj.traceback_str,
                                  exc_type_names=list(fail_obj))
Пример #25
0
def _captured_failure(msg):
        try:
            raise RuntimeError(msg)
        except Exception:
            return misc.Failure()
Пример #26
0
    def execute(self, engine):
        """This action executes the provided graph in parallel by selecting
        nodes which can run (those which have there dependencies satisfied
        or those with no dependencies) and submitting them to the executor
        to be ran, and then after running this process will be repeated until
        no more nodes can be ran (or a failure has a occured and all nodes
        were stopped from further running).
        """
        # A deque is a thread safe push/pop/popleft/append implementation
        all_futures = collections.deque()
        executor = engine.executor
        has_failed = threading.Event()
        deps_lock = threading.RLock()
        deps_counter = self._get_nodes_dependencies_count()
        was_suspended = threading.Event()

        def submit_followups(node):
            # Mutating the deps_counter isn't thread safe.
            with deps_lock:
                to_execute = self._resolve_dependencies(node, deps_counter)
            submit_count = 0
            for n in to_execute:
                try:
                    all_futures.append(executor.submit(run_node, n))
                    submit_count += 1
                except RuntimeError:
                    # Someone shutdown the executor while we are still
                    # using it, get out as quickly as we can...
                    has_failed.set()
                    break
            return submit_count

        def run_node(node):
            if has_failed.is_set():
                # Someone failed, don't even bother running.
                return
            action = self._action_mapping[node]
            try:
                if engine.is_running:
                    action.execute(engine)
                else:
                    was_suspended.set()
                    return
            except Exception:
                # Make sure others don't continue working (although they may
                # be already actively working, but u can't stop that anyway).
                has_failed.set()
                raise
            if has_failed.is_set():
                # Someone else failed, don't even bother submitting any
                # followup jobs.
                return
            # NOTE(harlowja): the future itself will not return until after it
            # submits followup tasks, this keeps the parent thread waiting for
            # more results since the all_futures deque will not be empty until
            # everyone stops submitting followups.
            submitted = submit_followups(node)
            LOG.debug("After running %s, %s followup actions were submitted",
                      node, submitted)

        # Nothing to execute in the first place
        if not deps_counter:
            return st.SUCCESS

        # Ensure that we obtain the lock just in-case the functions submitted
        # immediately themselves start submitting there own jobs (which could
        # happen if they are very quick).
        with deps_lock:
            to_execute = self._browse_nodes_to_execute(deps_counter)
            for n in to_execute:
                try:
                    all_futures.append(executor.submit(run_node, n))
                except RuntimeError:
                    # Someone shutdown the executor while we are still using
                    # it, get out as quickly as we can....
                    break

        # Keep on continuing to consume the futures until there are no more
        # futures to consume so that we can get there failures. Notice that
        # results are not captured, as results of tasks go into storage and
        # do not get returned here.
        failures = []
        while len(all_futures):
            # Take in FIFO order, not in LIFO order.
            f = all_futures.popleft()
            try:
                f.result()
            except futures.CancelledError:
                # TODO(harlowja): can we use the cancellation feature to
                # actually achieve cancellation in taskflow??
                pass
            except Exception:
                failures.append(misc.Failure())
        misc.Failure.reraise_if_any(failures)
        if was_suspended.is_set():
            return st.SUSPENDED
        else:
            return st.SUCCESS
Пример #27
0
 def _execute_retry(kwargs):
     try:
         result = retry.execute(**kwargs)
     except Exception:
         result = misc.Failure()
     return (retry, ex.EXECUTED, result)