def update_execution_cancelled(ctx): update_execution_status(ctx.execution_id, Execution.CANCELLED) send_workflow_event( ctx, event_type='workflow_cancelled', message="'{}' workflow execution cancelled" .format(ctx.workflow_id))
def cancel_workflow_task(self, execution_id, rest_token, tenant, execution_token, rest_host): logger.info('Cancelling workflow {0}'.format(execution_id)) class CancelCloudifyContext(object): """A CloudifyContext that has just enough data to cancel workflows """ def __init__(self): self.rest_host = rest_host self.tenant_name = tenant['name'] self.rest_token = rest_token self.execution_token = execution_token # always bypass - this is a kill, as forceful as we can get self.bypass_maintenance = True with current_workflow_ctx.push(CancelCloudifyContext()): self._workflow_registry.cancel(execution_id) self._cancel_agent_operations(execution_id) try: update_execution_status(execution_id, Execution.CANCELLED) except InvalidExecutionUpdateStatus: # the workflow process might have cleaned up, and marked the # workflow failed or cancelled already logger.info('Failed to update execution status: {0}'.format( execution_id))
def wrapper(*args, **kwargs): ctx = _find_context_arg(args, kwargs, _is_cloudify_workflow_context) if ctx is None: ctx = {} if not _is_cloudify_workflow_context(ctx): ctx = CloudifyWorkflowContext(ctx) kwargs['ctx'] = ctx rest = get_rest_client() parent_conn, child_conn = Pipe() try: if rest.executions.get(ctx.execution_id).status in \ (Execution.CANCELLING, Execution.FORCE_CANCELLING): # execution has been requested to be cancelled before it # was even started update_execution_cancelled(ctx) return api.EXECUTION_CANCELLED_RESULT update_execution_status(ctx.execution_id, Execution.STARTED) send_workflow_event(ctx, event_type='workflow_started', message="Starting '{}' workflow execution" .format(ctx.workflow_id)) # the actual execution of the workflow will run in another # process - this wrapper is the entry point for that # process, and takes care of forwarding the result or error # back to the parent process def child_wrapper(): try: start_event_monitor(ctx) current_workflow_ctx.set(ctx, kwargs) result = func(*args, **kwargs) if not ctx.internal.graph_mode: tasks = list(ctx.internal.task_graph.tasks_iter()) for task in tasks: task.async_result.get() child_conn.send({'result': result}) except api.ExecutionCancelled: child_conn.send({ 'result': api.EXECUTION_CANCELLED_RESULT}) except BaseException, e: tb = StringIO() traceback.print_exc(file=tb) err = { 'type': type(e).__name__, 'message': str(e), 'traceback': tb.getvalue() } child_conn.send({'error': err}) finally: current_workflow_ctx.clear() child_conn.close()
def cancel_workflow_task(self, execution_id, rest_token, tenant, execution_token): logger.info('Cancelling workflow {0}'.format(execution_id)) class CancelCloudifyContext(object): """A CloudifyContext that has just enough data to cancel workflows """ def __init__(self): self.tenant = tenant self.tenant_name = tenant['name'] self.rest_token = rest_token self.execution_token = execution_token with current_workflow_ctx.push(CancelCloudifyContext()): self._workflow_registry.cancel(execution_id) self._cancel_agent_operations(execution_id) try: update_execution_status(execution_id, Execution.CANCELLED) except InvalidExecutionUpdateStatus: # the workflow process might have cleaned up, and marked the # workflow failed or cancelled already logger.info('Failed to update execution status: {0}' .format(execution_id))
def _update_execution_status(self, status, error=None): if self.ctx.local or not self.update_execution_status: return caught_error = None for _ in range(3): try: return update_execution_status( self.ctx.execution_id, status, error) except Exception as e: self.ctx.logger.exception( 'Update execution status got unexpected rest error: %s', e) caught_error = e sleep(5) else: raise caught_error
def _update_execution_status(self, status, error=None): if self.ctx.local: return while True: try: return update_execution_status( self.ctx.execution_id, status, error) except InvalidExecutionUpdateStatus as exc: self.ctx.logger.exception( 'update execution status is invalid: {0}'.format(exc)) raise except Exception as exc: self.ctx.logger.exception( 'update execution status got unexpected rest error: {0}' .format(exc)) sleep(5)
def _update_execution_status(self, status, error=None): if self.ctx.local or not self.update_execution_status: return caught_error = None for _ in range(3): try: return update_execution_status(self.ctx.execution_id, status, error) except Exception as e: self.ctx.logger.exception( 'Update execution status got unexpected rest error: %s', e) caught_error = e sleep(5) else: raise caught_error
def _update_execution_status(self, status, error=None): if not self.ctx.local: update_execution_status(self.ctx.execution_id, status, error)
def remote(self): update_execution_status( workflow_ctx.execution_id, self._status)
def update_execution_cancelled(): update_execution_status(ctx.execution_id, Execution.CANCELLED) _send_workflow_cancelled_event(ctx)
def _remote_workflow(ctx, func, args, kwargs): def update_execution_cancelled(): update_execution_status(ctx.execution_id, Execution.CANCELLED) _send_workflow_cancelled_event(ctx) rest = get_rest_client() parent_queue, child_queue = (Queue.Queue(), Queue.Queue()) try: if rest.executions.get(ctx.execution_id).status in \ (Execution.CANCELLING, Execution.FORCE_CANCELLING): # execution has been requested to be cancelled before it # was even started update_execution_cancelled() return api.EXECUTION_CANCELLED_RESULT update_execution_status(ctx.execution_id, Execution.STARTED) _send_workflow_started_event(ctx) # the actual execution of the workflow will run in another # thread - this wrapper is the entry point for that # thread, and takes care of forwarding the result or error # back to the parent thread def child_wrapper(): try: ctx.internal.start_event_monitor() workflow_result = _execute_workflow_function( ctx, func, args, kwargs) child_queue.put({'result': workflow_result}) except api.ExecutionCancelled: child_queue.put({ 'result': api.EXECUTION_CANCELLED_RESULT}) except BaseException as workflow_ex: tb = StringIO() traceback.print_exc(file=tb) err = { 'type': type(workflow_ex).__name__, 'message': str(workflow_ex), 'traceback': tb.getvalue() } child_queue.put({'error': err}) finally: ctx.internal.stop_event_monitor() api.queue = parent_queue # starting workflow execution on child thread t = Thread(target=child_wrapper) t.start() # while the child thread is executing the workflow, # the parent thread is polling for 'cancel' requests while # also waiting for messages from the child thread has_sent_cancelling_action = False result = None execution = None while True: # check if child thread sent a message try: data = child_queue.get(timeout=5) if 'result' in data: # child thread has terminated result = data['result'] break else: # error occurred in child thread error = data['error'] raise exceptions.ProcessExecutionError(error['message'], error['type'], error['traceback']) except Queue.Empty: pass # check for 'cancel' requests execution = rest.executions.get(ctx.execution_id) if execution.status == Execution.FORCE_CANCELLING: result = api.EXECUTION_CANCELLED_RESULT break elif not has_sent_cancelling_action and \ execution.status == Execution.CANCELLING: # send a 'cancel' message to the child thread. It # is up to the workflow implementation to check for # this message and act accordingly (by stopping and # raising an api.ExecutionCancelled error, or by returning # the deprecated api.EXECUTION_CANCELLED_RESULT as result). # parent thread then goes back to polling for # messages from child process or possibly # 'force-cancelling' requests parent_queue.put({'action': 'cancel'}) has_sent_cancelling_action = True # updating execution status and sending events according to # how the execution ended if result == api.EXECUTION_CANCELLED_RESULT: update_execution_cancelled() if execution and execution.status == Execution.FORCE_CANCELLING: # TODO: kill worker externally raise RequestSystemExit() else: update_execution_status(ctx.execution_id, Execution.TERMINATED) _send_workflow_succeeded_event(ctx) return result except RequestSystemExit: raise except BaseException as e: if isinstance(e, exceptions.ProcessExecutionError): error_traceback = e.traceback else: error = StringIO() traceback.print_exc(file=error) error_traceback = error.getvalue() update_execution_status(ctx.execution_id, Execution.FAILED, error_traceback) _send_workflow_failed_event(ctx, e, error_traceback) raise
def remote(self): update_execution_status(workflow_ctx.execution_id, self.kwargs['status'])
def update_execution_status_task(): update_execution_status(self.workflow_ctx.execution_id, new_status)
def _update_execution_status(self, status, error=None): if self.ctx.local or not self.update_execution_status: return return update_execution_status(self.ctx.execution_id, status, error)
def update_execution_status_task(): update_execution_status(self.execution_id, new_status)
def workflow(func=None, **arguments): """ Decorate workflow functions with this decorator. Internally, if celery is installed, will also wrap the function with a ``@celery.task`` decorator The ``ctx`` injected to the function arguments is of type ``cloudify.workflows.workflow_context.CloudifyWorkflowContext`` Example:: @workflow def reinstall(ctx, **kwargs): pass """ if func is not None: def update_execution_cancelled(ctx): update_execution_status(ctx.execution_id, Execution.CANCELLED) send_workflow_event( ctx, event_type='workflow_cancelled', message="'{}' workflow execution cancelled" .format(ctx.workflow_id)) @_task @wraps(func) def wrapper(*args, **kwargs): ctx = _find_context_arg(args, kwargs, _is_cloudify_workflow_context) if ctx is None: ctx = {} if not _is_cloudify_workflow_context(ctx): ctx = CloudifyWorkflowContext(ctx) kwargs['ctx'] = ctx rest = get_rest_client() parent_conn, child_conn = Pipe() try: if rest.executions.get(ctx.execution_id).status in \ (Execution.CANCELLING, Execution.FORCE_CANCELLING): # execution has been requested to be cancelled before it # was even started update_execution_cancelled(ctx) return api.EXECUTION_CANCELLED_RESULT update_execution_status(ctx.execution_id, Execution.STARTED) send_workflow_event(ctx, event_type='workflow_started', message="Starting '{}' workflow execution" .format(ctx.workflow_id)) # the actual execution of the workflow will run in another # process - this wrapper is the entry point for that # process, and takes care of forwarding the result or error # back to the parent process def child_wrapper(): try: start_event_monitor(ctx) current_workflow_ctx.set(ctx, kwargs) result = func(*args, **kwargs) if not ctx.internal.graph_mode: tasks = list(ctx.internal.task_graph.tasks_iter()) for task in tasks: task.async_result.get() child_conn.send({'result': result}) except api.ExecutionCancelled: child_conn.send({ 'result': api.EXECUTION_CANCELLED_RESULT}) except BaseException, e: tb = StringIO() traceback.print_exc(file=tb) err = { 'type': type(e).__name__, 'message': str(e), 'traceback': tb.getvalue() } child_conn.send({'error': err}) finally: current_workflow_ctx.clear() child_conn.close() api.ctx = ctx api.pipe = child_conn # starting workflow execution on child process p = Process(target=child_wrapper) p.start() # while the child process is executing the workflow, # the parent process is polling for 'cancel' requests while # also waiting for messages from the child process has_sent_cancelling_action = False while True: # check if child process sent a message if parent_conn.poll(5): data = parent_conn.recv() if 'result' in data: # child process has terminated result = data['result'] break else: # error occurred in child process error = data['error'] raise ProcessExecutionError(error['message'], error['type'], error['traceback']) # check for 'cancel' requests execution = rest.executions.get(ctx.execution_id) if execution.status == Execution.FORCE_CANCELLING: # terminate the child process immediately p.terminate() result = api.EXECUTION_CANCELLED_RESULT break elif not has_sent_cancelling_action and \ execution.status == Execution.CANCELLING: # send a 'cancel' message to the child process. It # is up to the workflow implementation to check for # this message and act accordingly (by stopping and # returning a api.EXECUTION_CANCELLED_RESULT result). # parent process then goes back to polling for # messages from child process or possibly # 'force-cancelling' requests parent_conn.send({'action': 'cancel'}) has_sent_cancelling_action = True # updating execution status and sending events according to # how the execution ended if result == api.EXECUTION_CANCELLED_RESULT: update_execution_cancelled(ctx) else: update_execution_status(ctx.execution_id, Execution.TERMINATED) send_workflow_event( ctx, event_type='workflow_succeeded', message="'{}' workflow execution succeeded" .format(ctx.workflow_id)) return result except BaseException, e: if isinstance(e, ProcessExecutionError): error_traceback = e.traceback else: error = StringIO() traceback.print_exc(file=error) error_traceback = error.getvalue() update_execution_status(ctx.execution_id, Execution.FAILED, error_traceback) send_workflow_event( ctx, event_type='workflow_failed', message="'{}' workflow execution failed: {}" .format(ctx.workflow_id, str(e)), args={'error': error_traceback}) raise