Esempio n. 1
0
def make_traverse_cases():
    # pytest attempts to look for tests derefing the results, causing trouble
    from flowy.result import error, placeholder, result

    r0 = result(u'r0', 0)
    e1 = error('err1', 1)
    e2 = error('err2', 2)
    e3 = error('err3', 3)
    r4 = result(4, 4)
    ph = placeholder()

    return (
        ([], ([], tuple())),
        (1, (1, (1,))),
        (u'abc', (u'abc', (u'abc',))),
        (r0, (u'r0', (u'r0',))),
        (
            [e1, [e2, e3], (r4, r0), [1, 2, [3]], {r4: ['xyz', ph]}],
            (
                [e1, [e2, e3], [4, u'r0'], [1, 2, [3]], {4: ['xyz', ph]}],
                (e1, e2, e3, 4, u'r0', 1, 2, 3, 4, 'xyz', ph)
            )
        ), (
            [{(r4, tuple()): [r0, (e1, ph), tuple()]}],
            (
                [{(4, tuple()): [u'r0', [e1, ph], []]}],
                (4, u'r0', e1, ph)
            )
        )
    )
Esempio n. 2
0
 def test_first_mixed(self):
     from flowy import first
     from flowy.result import result, error, timeout, placeholder
     r = result(1, 1)
     t = timeout(2)
     e = error('err!', 3)
     p = placeholder()
Esempio n. 3
0
 def test_first_mixed(self):
     from flowy import first
     from flowy.result import result, error, timeout, placeholder
     r = result(1, 1)
     t = timeout(2)
     e = error('err!', 3)
     p = placeholder()
Esempio n. 4
0
 def test_first_results(self):
     from flowy import first
     from flowy.result import result, error, timeout, placeholder
     r = result(1, 1)
     t = timeout(2)
     e = error('err!', 3)
     p = placeholder()
     self.assertEquals(first([e, p, r, t]).__factory__, r.__factory__)
Esempio n. 5
0
 def test_first_results(self):
     from flowy import first
     from flowy.result import result, error, timeout, placeholder
     r = result(1, 1)
     t = timeout(2)
     e = error('err!', 3)
     p = placeholder()
     self.assertEquals(first([e, p, r, t]).__factory__, r.__factory__)
Esempio n. 6
0
def parallel_reduce(f, iterable, initializer=sentinel):
    """Like reduce() but optimized to maximize parallel execution.

    The reduce function must be associative and commutative.

    The reduction will start as soon as two results are available, regardless
    of their "position". For example, the following reduction is possible:

     5 ----1-----|
    15           --------------4----------|
    15           |                        -------------12|
    15           |                        |              -------------17|
  R 15           |                        |              |              -------------21
    15 ----------|---2-----|              |              |              |
    15           |         --------------8|              |              |
    10 ---------3|         |                             |              |
    60 --------------------|-----------------------------|--------4-----|
    50 --------------------|----------------------------5|
    20 -------------------6|

    The iterable must have at least one element, otherwise a ValueError will be
    raised.

    The improvement over the built-in reduce() is obtained by starting the
    reduction as soon as any two results are available. The number of reduce
    operations is always constant and equal to len(iterable) - 1 regardless of
    how the reduction graph looks like.
    """
    if initializer is not sentinel:
        iterable = itertools.chain([initializer], iterable)
    results, non_results = [], []
    for x in iterable:
        if is_result_proxy(x):
            results.append(x)
        else:
            non_results.append(x)
    i = iter(non_results)
    reminder = sentinel
    for x in i:
        try:
            y = next(i)
            results.append(f(x, y))
        except StopIteration:
            reminder = x
            if not results:  # len(iterable) == 1
                # Wrap the value in a result for uniform interface
                return result(x, -1)
    if not results:  # len(iterable) == 0
        raise ValueError(
            'parallel_reduce() of empty sequence with no initial value')
    if is_result_proxy(results[0]):
        results = [(r.__factory__, r) for r in results]
        heapq.heapify(results)
        return _parallel_reduce_recurse(f, results, reminder)
    else:
        # Looks like we don't use a task for reduction, fallback on reduce
        return reduce(f, results)
Esempio n. 7
0
def parallel_reduce(f, iterable, initializer=sentinel):
    """Like reduce() but optimized to maximize parallel execution.

    The reduce function must be associative and commutative.

    The reduction will start as soon as two results are available, regardless
    of their "position". For example, the following reduction is possible:

     5 ----1-----|
    15           --------------4----------|
    15           |                        -------------12|
    15           |                        |              -------------17|
  R 15           |                        |              |              -------------21
    15 ----------|---2-----|              |              |              |
    15           |         --------------8|              |              |
    10 ---------3|         |                             |              |
    60 --------------------|-----------------------------|--------4-----|
    50 --------------------|----------------------------5|
    20 -------------------6|

    The iterable must have at least one element, otherwise a ValueError will be
    raised.

    The improvement over the built-in reduce() is obtained by starting the
    reduction as soon as any two results are available. The number of reduce
    operations is always constant and equal to len(iterable) - 1 regardless of
    how the reduction graph looks like.
    """
    if initializer is not sentinel:
        iterable = itertools.chain([initializer], iterable)
    results, non_results = [], []
    for x in iterable:
        if is_result_proxy(x):
            results.append(x)
        else:
            non_results.append(x)
    i = iter(non_results)
    reminder = sentinel
    for x in i:
        try:
            y = next(i)
            results.append(f(x, y))
        except StopIteration:
            reminder = x
            if not results:  # len(iterable) == 1
                # Wrap the value in a result for uniform interface
                return result(x, -1)
    if not results:  # len(iterable) == 0
        raise ValueError(
            'parallel_reduce() of empty sequence with no initial value')
    if is_result_proxy(results[0]):
        results = [(r.__factory__, r) for r in results]
        heapq.heapify(results)
        return _parallel_reduce_recurse(f, results, reminder)
    else:
        # Looks like we don't use a task for reduction, fallback on reduce
        return reduce(f, results)
Esempio n. 8
0
 def test_results(self):
     from flowy import finish_order
     from flowy.result import result, error, timeout, placeholder
     r = result(1, 1)
     t = timeout(2)
     e = error('err!', 3)
     p = placeholder()
     fo = list(finish_order([e, p, r, t]))
     self.assertEquals(fo[0].__factory__, r.__factory__)
     self.assertEquals(fo[1].__factory__, t.__factory__)
     self.assertEquals(fo[2].__factory__, e.__factory__)
     self.assertEquals(fo[3].__factory__, p.__factory__)
Esempio n. 9
0
 def test_results(self):
     from flowy import finish_order
     from flowy.result import result, error, timeout, placeholder
     r = result(1, 1)
     t = timeout(2)
     e = error('err!', 3)
     p = placeholder()
     fo = list(finish_order([e, p, r, t]))
     self.assertEquals(fo[0].__factory__, r.__factory__)
     self.assertEquals(fo[1].__factory__, t.__factory__)
     self.assertEquals(fo[2].__factory__, e.__factory__)
     self.assertEquals(fo[3].__factory__, p.__factory__)
Esempio n. 10
0
 def test_mixed(self):
     from flowy import finish_order
     from flowy.result import result, error, timeout, placeholder
     r = result(1, 1)
     t = timeout(2)
     e = error('err!', 3)
     p = placeholder()
     fo = list(finish_order([1, e, 2, p, 3, r, t]))
     self.assertEquals(fo[:3], [1, 2, 3])
     self.assertEquals(fo[3].__factory__, r.__factory__)
     self.assertEquals(fo[4].__factory__, t.__factory__)
     self.assertEquals(fo[5].__factory__, e.__factory__)
     self.assertEquals(fo[6].__factory__, p.__factory__)
Esempio n. 11
0
def make_traverse_cases():
    # pytest attempts to look for tests derefing the results, causing trouble
    from flowy.result import error, placeholder, result

    r0 = result(u'r0', 0)
    e1 = error('err1', 1)
    e2 = error('err2', 2)
    e3 = error('err3', 3)
    r4 = result(4, 4)
    ph = placeholder()

    return (([], ([], tuple())), (1, (1, (1, ))),
            (u'abc', (u'abc', (u'abc', ))), (r0, (u'r0', (u'r0', ))),
            ([e1, [e2, e3], (r4, r0), [1, 2, [3]], {
                r4: ['xyz', ph]
            }], ([e1, [e2, e3], [4, u'r0'], [1, 2, [3]], {
                4: ['xyz', ph]
            }], (e1, e2, e3, 4, u'r0', 1, 2, 3, 4, 'xyz', ph))), ([{
                (r4, tuple()): [r0, (e1, ph), tuple()]
            }], ([{
                (4, tuple()): [u'r0', [e1, ph], []]
            }], (4, u'r0', e1, ph))))
Esempio n. 12
0
 def test_mixed(self):
     from flowy import finish_order
     from flowy.result import result, error, timeout, placeholder
     r = result(1, 1)
     t = timeout(2)
     e = error('err!', 3)
     p = placeholder()
     fo = list(finish_order([1, e, 2, p, 3, r, t]))
     self.assertEquals(fo[:3], [1, 2, 3])
     self.assertEquals(fo[3].__factory__, r.__factory__)
     self.assertEquals(fo[4].__factory__, t.__factory__)
     self.assertEquals(fo[5].__factory__, e.__factory__)
     self.assertEquals(fo[6].__factory__, p.__factory__)
Esempio n. 13
0
def make_err_and_ph_cases():
    from flowy.result import error, placeholder, result

    r0 = result(u'r0', 0)
    e1 = error('err1', 1)
    e2 = error('err2', 2)
    ph = placeholder()

    return (
        (((None, False), 1), (None, False)),
        (((None, False), e1), (e1, False)),
        (((None, False), ph), (None, True)),
        (((e1, False), e2), (e1, False)),
        (((e2, False), e1), (e1, False)),
        (((e1, False), ph), (e1, True)),
    )
Esempio n. 14
0
def make_collect_cases():
    from flowy.result import error, placeholder, result

    r0 = result(u'r0', 0)
    e1 = error('err1', 1)
    e2 = error('err2', 2)
    ph = placeholder()

    return (
        (((None, None), 1), (None, None)),
        (((None, None), ph), (None, None)),
        (((None, None), e1), (e1, None)),
        (((None, None), e2), (e2, None)),
        (((e2, None), e1), (e1, None)),
        (((None, None), r0), (None, [r0])),
    )
Esempio n. 15
0
def make_err_and_ph_cases():
    from flowy.result import error, placeholder, result

    r0 = result(u'r0', 0)
    e1 = error('err1', 1)
    e2 = error('err2', 2)
    ph = placeholder()

    return (
        (((None, False), 1), (None, False)),
        (((None, False), e1), (e1, False)),
        (((None, False), ph), (None, True)),
        (((e1, False), e2), (e1, False)),
        (((e2, False), e1), (e1, False)),
        (((e1, False), ph), (e1, True)),
    )
Esempio n. 16
0
def make_collect_cases():
    from flowy.result import error, placeholder, result

    r0 = result(u'r0', 0)
    e1 = error('err1', 1)
    e2 = error('err2', 2)
    ph = placeholder()

    return (
        (((None, None), 1), (None, None)),
        (((None, None), ph), (None, None)),
        (((None, None), e1), (e1, None)),
        (((None, None), e2), (e2, None)),
        (((e2, None), e1), (e1, None)),
        (((None, None), r0), (None, [r0])),
    )
Esempio n. 17
0
    def __call__(self, *args, **kwargs):
        """Consult the execution history for results or schedule a new task.

        This is method gets called from the user workflow code.
        When calling it, the task it refers to can be in one of the following
        states: RUNNING, READY, FAILED, TIMEDOUT or NOTSCHEDULED.

        * If the task is RUNNING this returns a Placeholder. The Placeholder
          interrupts the workflow execution if its result is accessed by
          raising a SuspendTask exception.
        * If the task is READY this returns a Result object. Calling the result
          method on this object will just return the final value the task
          produced.
        * If the task is FAILED this returns an Error object. Calling the
          result method on this object will raise a TaskError exception
          containing the error message set by the task.
        * In case of a TIMEOUT this returns an Timeout object. Calling the
          result method on this object will raise TaskTimedout exception, a
          subclass of TaskError.
        * If the task was NOTSCHEDULED yet:
            * If any errors in arguments, propagate the error by returning
              another error.
            * If any placeholders in arguments, don't do anything because there
              are unresolved dependencies.
            * Finally, if all the arguments look OK, schedule it for execution.
        """
        task_exec_history = self.task_exec_history
        call_number = self.call_number
        self.call_number += 1
        r = placeholder()
        for retry_number, delay in enumerate(self.retry):
            if task_exec_history.is_timeout(call_number, retry_number):
                continue
            if task_exec_history.is_running(call_number, retry_number):
                break  # result = Placehloder
            if task_exec_history.has_result(call_number, retry_number):
                value = task_exec_history.result(call_number, retry_number)
                order = task_exec_history.order(call_number, retry_number)
                try:
                    value = self.deserialize_result(value)
                except Exception as e:
                    logger.exception('Error while deserializing the activity result:')
                    self.task_decision.fail(e)
                    break  # result = Placeholder
                r = result(value, order)
                break
            if task_exec_history.is_error(call_number, retry_number):
                err = task_exec_history.error(call_number, retry_number)
                order = task_exec_history.order(call_number, retry_number)
                r = error(err, order)
                break
            traversed_args, (err, placeholders) = traverse_data([args, kwargs])
            if err:
                r = copy_result_proxy(err)
                break
            if placeholders:
                break  # result = Placeholder
            t_args, t_kwargs = traversed_args
            try:
                input_data = self.serialize_input(*t_args, **t_kwargs)
            except Exception as e:
                logger.exception('Error while serializing the task input:')
                self.task_decision.fail(e)
                break  # result = Placeholder
            self.task_decision.schedule(call_number, retry_number, delay, input_data)
            break  # result = Placeholder
        else:
            # No retries left, it must be a timeout
            order = task_exec_history.order(call_number, retry_number)
            r = timeout(order)
        return r
Esempio n. 18
0
    def __call__(self, *args, **kwargs):
        """Consult the execution history for results or schedule a new task.

        This is method gets called from the user workflow code.
        When calling it, the task it refers to can be in one of the following
        states: RUNNING, READY, FAILED, TIMEDOUT or NOTSCHEDULED.

        * If the task is RUNNING this returns a Placeholder. The Placeholder
          interrupts the workflow execution if its result is accessed by
          raising a SuspendTask exception.
        * If the task is READY this returns a Result object. Calling the result
          method on this object will just return the final value the task
          produced.
        * If the task is FAILED this returns an Error object. Calling the
          result method on this object will raise a TaskError exception
          containing the error message set by the task.
        * In case of a TIMEOUT this returns an Timeout object. Calling the
          result method on this object will raise TaskTimedout exception, a
          subclass of TaskError.
        * If the task was NOTSCHEDULED yet:
            * If any errors in arguments, propagate the error by returning
              another error.
            * If any placeholders in arguments, don't do anything because there
              are unresolved dependencies.
            * Finally, if all the arguments look OK, schedule it for execution.
        """
        task_exec_history = self.task_exec_history
        call_number = self.call_number
        self.call_number += 1
        r = placeholder()
        for retry_number, delay in enumerate(self.retry):
            if task_exec_history.is_timeout(call_number, retry_number):
                continue
            if task_exec_history.is_running(call_number, retry_number):
                break  # result = Placehloder
            if task_exec_history.has_result(call_number, retry_number):
                value = task_exec_history.result(call_number, retry_number)
                order = task_exec_history.order(call_number, retry_number)
                try:
                    value = self.deserialize_result(value)
                except Exception as e:
                    logger.exception(
                        'Error while deserializing the activity result:')
                    self.task_decision.fail(e)
                    break  # result = Placeholder
                r = result(value, order)
                break
            if task_exec_history.is_error(call_number, retry_number):
                err = task_exec_history.error(call_number, retry_number)
                order = task_exec_history.order(call_number, retry_number)
                r = error(err, order)
                break
            traversed_args, (err, placeholders) = traverse_data([args, kwargs])
            if err:
                r = copy_result_proxy(err)
                break
            if placeholders:
                break  # result = Placeholder
            t_args, t_kwargs = traversed_args
            try:
                input_data = self.serialize_input(*t_args, **t_kwargs)
            except Exception as e:
                logger.exception('Error while serializing the task input:')
                self.task_decision.fail(e)
                break  # result = Placeholder
            self.task_decision.schedule(call_number, retry_number, delay,
                                        input_data)
            break  # result = Placeholder
        else:
            # No retries left, it must be a timeout
            order = task_exec_history.order(call_number, retry_number)
            r = timeout(order)
        return r