def schedule_task(self, a_task, task_list=None): """ Let a task schedule itself. If too many decisions are in flight, add a timer decision and raise ExecutionBlocked. :param a_task: :type a_task: ActivityTask | WorkflowTask | SignalTask | MarkerTask :param task_list: :type task_list: Optional[str] :raise: exceptions.ExecutionBlocked if too many decisions waiting """ if a_task.idempotent: task_identifier = (type(a_task), self.domain, a_task.id) if task_identifier in self._idempotent_tasks_to_submit: logger.debug('Not resubmitting task {}'.format(a_task.name)) return self._idempotent_tasks_to_submit.add(task_identifier) # if isinstance(a_task, SignalTask): # if a_task.workflow_id is None: # a_task.workflow_id = self._execution_context['workflow_id'] # if a_task.run_id is None: # a_task.run_id = self._execution_context['run_id'] # NB: ``decisions`` contains a single decision. decisions = a_task.schedule(self.domain, task_list, priority=self.current_priority) # Ready to schedule if isinstance(a_task, ActivityTask): self._open_activity_count += 1 elif isinstance(a_task, MarkerTask): self._append_timer = True # markers don't generate decisions, so force a wake-up timer # Check if we won't violate the 1MB limit on API requests ; if so, do NOT # schedule the requested task and block execution instead, with a timer # to wake up the workflow immediately after completing these decisions. # See: http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dg-limits.html request_size = len(json.dumps(self._decisions + decisions)) # We keep a 5kB of error margin for headers, json structure, and the # timer decision, and 32kB for the context, even if we don't use it now. if request_size > constants.MAX_REQUEST_SIZE - 5000 - 32000: # TODO: at this point we may check that self._decisions is not empty # If it's the case, it means that a single decision was weighting # more than 900kB, so we have bigger problems. self._append_timer = True raise exceptions.ExecutionBlocked() self._decisions.extend(decisions) # Check if we won't exceed max decisions -1 # TODO: if we had exactly MAX_DECISIONS - 1 to take, this will wake up # the workflow for no reason. Evaluate if we can do better. if len(self._decisions) == constants.MAX_DECISIONS - 1: # We add a timer to wake up the workflow immediately after # completing these decisions. self._append_timer = True raise exceptions.ExecutionBlocked()
def schedule_task(self, a_task, task_list=None): """ Let a task schedule itself. If too many decisions are in flight, add a timer decision and raise ExecutionBlocked. :param a_task: :type a_task: ActivityTask :param task_list: :type task_list: Optional[str] :return: :rtype: :raise: exceptions.ExecutionBlocked if too many decisions waiting """ logger.debug('executor is scheduling task {} on task_list {}'.format( a_task.name, task_list, )) decisions = a_task.schedule(self.domain, task_list) # ``decisions`` contains a single decision. self._decisions.extend(decisions) self._open_activity_count += 1 if len(self._decisions) == constants.MAX_DECISIONS - 1: # We add a timer to wake up the workflow immediately after # completing these decisions. timer = swf.models.decision.TimerDecision( 'start', id='resume-after-{}'.format(a_task.id), start_to_fire_timeout='0') self._decisions.append(timer) raise exceptions.ExecutionBlocked()
def propagate_signals(self): """ Send every signals we got to our parent and children. Don't send to workflows present in history.signaled_workflows. """ history = self._history if not history.signals: return known_workflows_ids = [] if self._run_context.get('parent_workflow_id'): known_workflows_ids.append( (self._run_context['parent_workflow_id'], self._run_context['parent_run_id']) ) known_workflows_ids.extend( (w['workflow_id'], w['run_id']) for w in history.child_workflows.values() if w['state'] == 'started' ) known_workflows_ids = frozenset(known_workflows_ids) signals_scheduled = False for signal in history.signals.values(): input = signal['input'] if not isinstance(input, dict): # foreign signal: don't try processing it continue propagate = input.get('__propagate', False) if not propagate: continue name = signal['name'] args = input.get('args', ()) kwargs = input.get('kwargs', {}) sender = ( signal['external_workflow_id'], signal['external_run_id'] ) signaled_workflows_ids = set( (w['workflow_id'], w['run_id']) for w in history.signaled_workflows[name] ) not_signaled_workflows_ids = list(known_workflows_ids - signaled_workflows_ids - {sender}) extra_input = {'__propagate': propagate} for workflow_id, run_id in not_signaled_workflows_ids: self.schedule_task(SignalTask( name, workflow_id, run_id, None, extra_input, *args, **kwargs )) signals_scheduled = True if signals_scheduled: raise exceptions.ExecutionBlocked()
def fail(self, reason, details=None): self.on_failure(reason, details) decision = swf.models.decision.WorkflowExecutionDecision() decision.fail( reason='Workflow execution failed: {}'.format(reason), details=details, ) self._decisions_and_context.append_decision(decision) raise exceptions.ExecutionBlocked('workflow execution failed')
def fail(self, reason, details=None): self.on_failure(reason, details) decision = swf.models.decision.WorkflowExecutionDecision() decision.fail( reason=swf.format.reason( 'Workflow execution failed: {}'.format(reason)), details=swf.format.details(details), ) self._decisions.append(decision) raise exceptions.ExecutionBlocked('workflow execution failed')
def schedule_task(self, a_task, task_list=None): logger.debug('executor is scheduling task {} on task_list {}'.format( a_task.name, task_list, )) decisions = a_task.schedule(self.domain, task_list) # ``decisions`` contains a single decision. self._decisions.extend(decisions) self._open_activity_count += 1 if len(self._decisions) == constants.MAX_DECISIONS - 1: # We add a timer to wake up the workflow immediately after # completing these decisions. timer = swf.models.decision.TimerDecision( 'start', id='resume-after-{}'.format(a_task.id), start_to_fire_timeout='0') self._decisions.append(timer) raise exceptions.ExecutionBlocked()