def __call__(self, *args, **kwargs): node_id = "%s-%s" % (self.trace_name, self.call_number) r = super(TracingProxy, self).__call__(*args, **kwargs) assert is_result_proxy(r) factory = r.__factory__ factory.node_id = node_id deps = [] deps_ids = set() for a in args: if is_result_proxy(a): if id(a) not in deps_ids: deps.append(a) deps_ids.add(id(a)) for k in kwargs.values(): if is_result_proxy(k): if id(k) not in deps_ids: deps.append(k) deps_ids.add(id(k)) errors, placeholders = scan_args(args, kwargs) if errors: self.tracer.schedule_activity(node_id, self.trace_name) self.tracer.flush_scheduled() error_factory = first(errors).__factory__ self.tracer.error(node_id, str(error_factory.value)) for dep in deps: self.tracer.add_dependency(dep.__factory__.node_id, node_id) return r
def check_err_and_placeholders(result, value): err, placeholders = result try: wait(value) except TaskError: if err is None: err = value else: err = first(err, value) except SuspendTask: placeholders = True return err, placeholders
def collect_err_and_results(result, value): err, results = result if not is_result_proxy(value): return result try: wait(value) except TaskError: if err is None: err = value else: err = first(err, value) except SuspendTask: pass else: if results is None: results = [] results.append(value) return err, results
def __call__(self, *args, **kwargs): """Consult the execution history for results or schedule a new task. This is method gets called from the user workflow code. When calling it, the task it refers to can be in one of the following states: RUNNING, READY, FAILED, TIMEDOUT or NOTSCHEDULED. * If the task is RUNNING this returns a Placeholder. The Placeholder interrupts the workflow execution if its result is accessed by raising a SuspendTask exception. * If the task is READY this returns a Result object. Calling the result method on this object will just return the final value the task produced. * If the task is FAILED this returns an Error object. Calling the result method on this object will raise a TaskError exception containing the error message set by the task. * In case of a TIMEOUT this returns an Timeout object. Calling the result method on this object will raise TaskTimedout exception, a subclass of TaskError. * If the task was NOTSCHEDULED yet: * If any errors in arguments, propagate the error by returning another error. * If any placeholders in arguments, don't do anything because there are unresolved dependencies. * Finally, if all the arguments look OK, schedule it for execution. """ task_exec_history = self.task_exec_history call_number = self.call_number self.call_number += 1 r = placeholder() for retry_number, delay in enumerate(self.retry): if task_exec_history.is_timeout(call_number, retry_number): continue if task_exec_history.is_running(call_number, retry_number): break # result = Placehloder if task_exec_history.has_result(call_number, retry_number): value = task_exec_history.result(call_number, retry_number) order = task_exec_history.order(call_number, retry_number) try: value = self.deserialize_result(value) except Exception as e: logger.exception('Error while deserializing the activity result:') self.task_decision.fail(e) break # result = Placeholder r = result(value, order) break if task_exec_history.is_error(call_number, retry_number): err = task_exec_history.error(call_number, retry_number) order = task_exec_history.order(call_number, retry_number) r = error(err, order) break errors, placeholders = scan_args(args, kwargs) if errors: r = copy_result_proxy(first(errors)) break if placeholders: break # result = Placeholder try: input_data = self.serialize_input(*args, **kwargs) except Exception as e: logger.exception('Error while serializing the task input:') self.task_decision.fail(e) break # result = Placeholder self.task_decision.schedule(call_number, retry_number, delay, input_data) break # result = Placeholder else: # No retries left, it must be a timeout order = task_exec_history.order(call_number, retry_number) r = timeout(order) return r