def _handle_remote_workflow(self): rest = get_rest_client() amqp_client_utils.init_amqp_client() try: execution = rest.executions.get(self.ctx.execution_id, _include=['status']) if execution.status in (Execution.CANCELLING, Execution.FORCE_CANCELLING): # execution has been requested to be cancelled before it was # even started self._workflow_cancelled() return api.EXECUTION_CANCELLED_RESULT self._workflow_started() queue = Queue.Queue() t = AMQPWrappedThread(target=self._remote_workflow_child_thread, args=(queue,)) t.start() # while the child thread is executing the workflow, the parent # thread is polling for 'cancel' requests while also waiting for # messages from the child thread result = None while True: # check if child thread sent a message try: data = queue.get(timeout=5) if 'result' in data: # child thread has terminated result = data['result'] break else: # error occurred in child thread error = data['error'] raise exceptions.ProcessExecutionError( error['message'], error['type'], error['traceback']) except Queue.Empty: pass # check for 'cancel' requests execution = rest.executions.get(self.ctx.execution_id, _include=['status']) if execution.status == Execution.FORCE_CANCELLING: result = api.EXECUTION_CANCELLED_RESULT break elif execution.status == Execution.CANCELLING: # send a 'cancel' message to the child thread. It is up to # the workflow implementation to check for this message # and act accordingly (by stopping and raising an # api.ExecutionCancelled error, or by returning the # deprecated api.EXECUTION_CANCELLED_RESULT as result). # parent thread then goes back to polling for messages from # child thread or possibly 'force-cancelling' requests api.cancel_request = True if result == api.EXECUTION_CANCELLED_RESULT: self._workflow_cancelled() else: self._workflow_succeeded() return result except exceptions.ProcessExecutionError as e: self._workflow_failed(e, e.traceback) raise except BaseException as e: error = StringIO.StringIO() traceback.print_exc(file=error) self._workflow_failed(e, error.getvalue()) raise finally: amqp_client_utils.close_amqp_client()
def _handle_remote_workflow(self): tenant = self.ctx._context['tenant'].get('original_name', self.ctx.tenant_name) rest = get_rest_client(tenant=tenant) execution = rest.executions.get(self.ctx.execution_id, _include=['status']) if execution.status == Execution.STARTED: self.ctx.resume = True try: amqp_client_utils.init_events_publisher() try: self._workflow_started() except InvalidExecutionUpdateStatus: self._workflow_cancelled() return api.EXECUTION_CANCELLED_RESULT queue = Queue.Queue() t = AMQPWrappedThread(target=self._remote_workflow_child_thread, args=(queue, ), name='Workflow-Child') t.start() # while the child thread is executing the workflow, the parent # thread is polling for 'cancel' requests while also waiting for # messages from the child thread result = None while True: # check if child thread sent a message try: data = queue.get(timeout=5) if 'result' in data: # child thread has terminated result = data['result'] break else: # error occurred in child thread error = data['error'] raise exceptions.ProcessExecutionError( error['message'], error['type'], error['traceback']) except Queue.Empty: pass # A very hacky way to solve an edge case when trying to poll # for the execution status while the DB is downgraded during # a snapshot restore if self.cloudify_context['workflow_id'] == 'restore_snapshot': continue # check for 'cancel' requests execution = rest.executions.get(self.ctx.execution_id, _include=['status']) if execution.status in [ Execution.CANCELLING, Execution.FORCE_CANCELLING, Execution.KILL_CANCELLING ]: # send a 'cancel' message to the child thread. It is up to # the workflow implementation to check for this message # and act accordingly (by stopping and raising an # api.ExecutionCancelled error, or by returning the # deprecated api.EXECUTION_CANCELLED_RESULT as result). # parent thread then goes back to polling for messages from # child thread or possibly 'force-cancelling' requests api.cancel_request = True if execution.status == Execution.KILL_CANCELLING: # if a custom workflow function must attempt some cleanup, # it might attempt to catch SIGTERM, and confirm using this # flag that it is being kill-cancelled api.kill_request = True if execution.status in [ Execution.FORCE_CANCELLING, Execution.KILL_CANCELLING ]: # force-cancel additionally stops this loop immediately result = api.EXECUTION_CANCELLED_RESULT break if result == api.EXECUTION_CANCELLED_RESULT: self._workflow_cancelled() else: self._workflow_succeeded() return result except exceptions.ProcessExecutionError as e: self._workflow_failed(e, e.traceback) raise except BaseException as e: self._workflow_failed(e, traceback.format_exc()) raise finally: amqp_client_utils.close_amqp_client()
def _remote_workflow(ctx, func, args, kwargs): def update_execution_cancelled(): update_execution_status(ctx.execution_id, Execution.CANCELLED) _send_workflow_cancelled_event(ctx) rest = get_rest_client() parent_queue, child_queue = (Queue.Queue(), Queue.Queue()) try: if rest.executions.get(ctx.execution_id).status in \ (Execution.CANCELLING, Execution.FORCE_CANCELLING): # execution has been requested to be cancelled before it # was even started update_execution_cancelled() return api.EXECUTION_CANCELLED_RESULT update_execution_status(ctx.execution_id, Execution.STARTED) _send_workflow_started_event(ctx) # the actual execution of the workflow will run in another # thread - this wrapper is the entry point for that # thread, and takes care of forwarding the result or error # back to the parent thread def child_wrapper(): try: ctx.internal.start_event_monitor() workflow_result = _execute_workflow_function( ctx, func, args, kwargs) child_queue.put({'result': workflow_result}) except api.ExecutionCancelled: child_queue.put({ 'result': api.EXECUTION_CANCELLED_RESULT}) except BaseException as workflow_ex: tb = StringIO() traceback.print_exc(file=tb) err = { 'type': type(workflow_ex).__name__, 'message': str(workflow_ex), 'traceback': tb.getvalue() } child_queue.put({'error': err}) finally: ctx.internal.stop_event_monitor() api.queue = parent_queue # starting workflow execution on child thread t = Thread(target=child_wrapper) t.start() # while the child thread is executing the workflow, # the parent thread is polling for 'cancel' requests while # also waiting for messages from the child thread has_sent_cancelling_action = False result = None execution = None while True: # check if child thread sent a message try: data = child_queue.get(timeout=5) if 'result' in data: # child thread has terminated result = data['result'] break else: # error occurred in child thread error = data['error'] raise exceptions.ProcessExecutionError(error['message'], error['type'], error['traceback']) except Queue.Empty: pass # check for 'cancel' requests execution = rest.executions.get(ctx.execution_id) if execution.status == Execution.FORCE_CANCELLING: result = api.EXECUTION_CANCELLED_RESULT break elif not has_sent_cancelling_action and \ execution.status == Execution.CANCELLING: # send a 'cancel' message to the child thread. It # is up to the workflow implementation to check for # this message and act accordingly (by stopping and # raising an api.ExecutionCancelled error, or by returning # the deprecated api.EXECUTION_CANCELLED_RESULT as result). # parent thread then goes back to polling for # messages from child process or possibly # 'force-cancelling' requests parent_queue.put({'action': 'cancel'}) has_sent_cancelling_action = True # updating execution status and sending events according to # how the execution ended if result == api.EXECUTION_CANCELLED_RESULT: update_execution_cancelled() if execution and execution.status == Execution.FORCE_CANCELLING: # TODO: kill worker externally raise RequestSystemExit() else: update_execution_status(ctx.execution_id, Execution.TERMINATED) _send_workflow_succeeded_event(ctx) return result except RequestSystemExit: raise except BaseException as e: if isinstance(e, exceptions.ProcessExecutionError): error_traceback = e.traceback else: error = StringIO() traceback.print_exc(file=error) error_traceback = error.getvalue() update_execution_status(ctx.execution_id, Execution.FAILED, error_traceback) _send_workflow_failed_event(ctx, e, error_traceback) raise