def exception_to_error_cause(exception, tb): error = StringIO() etype = type(exception) traceback.print_exception(etype, exception, tb, file=error) return { 'message': u'{0}'.format(exception), 'traceback': error.getvalue(), 'type': etype.__name__ }
def internal_error(e): s_traceback = StringIO() traceback.print_exc(file=s_traceback) return jsonify( message="Internal error occurred in manager REST server - {0}: {1}". format(type(e).__name__, e), error_code=INTERNAL_SERVER_ERROR_CODE, server_traceback=s_traceback.getvalue()), 500
def serialize_known_exception(e): """ Serialize a cloudify exception into a dict :param e: A cloudify exception :return: A JSON serializable payload dict """ tb = StringIO() traceback.print_exc(file=tb) trace_out = tb.getvalue() # Needed because HttpException constructor sucks append_message = False # Convert exception to a know exception type that can be deserialized # by the calling process known_exception_type_args = [] if isinstance(e, exceptions.HttpException): known_exception_type = exceptions.HttpException known_exception_type_args = [e.url, e.code] append_message = True elif isinstance(e, exceptions.NonRecoverableError): known_exception_type = exceptions.NonRecoverableError elif isinstance(e, exceptions.OperationRetry): known_exception_type = exceptions.OperationRetry known_exception_type_args = [e.retry_after] trace_out = None elif isinstance(e, exceptions.RecoverableError): known_exception_type = exceptions.RecoverableError known_exception_type_args = [e.retry_after] elif isinstance(e, exceptions.StopAgent): known_exception_type = exceptions.StopAgent elif isinstance(e, exceptions.WorkflowFailed): known_exception_type = exceptions.WorkflowFailed trace_out = None else: # convert pure user exceptions to a RecoverableError known_exception_type = exceptions.RecoverableError try: causes = e.causes except AttributeError: causes = [] payload = { 'exception_type': type(e).__name__, 'message': format_exception(e), 'known_exception_type': known_exception_type.__name__, 'known_exception_type_args': known_exception_type_args, 'known_exception_type_kwargs': { 'causes': causes or [] }, 'append_message': append_message, } if trace_out: payload['traceback'] = trace_out return payload
def _handle_local_workflow(self): try: self._workflow_started() result = self._execute_workflow_function() self._workflow_succeeded() return result except Exception as e: error = StringIO() traceback.print_exc(file=error) self._workflow_failed(e, error.getvalue()) raise
def test_operation_retry_task_message(self): output_buffer = StringIO() original_event_out = logs.stdout_event_out # Provide same interface for all event output def event_output(log, ctx=None): original_event_out(log) output_buffer.write('{0}\n'.format(log['message']['text'])) with patch('cloudify.logs.stdout_event_out', event_output): self.test_operation_retry() self.assertIn('Task rescheduled', output_buffer.getvalue()) self.assertIn('Operation will be retried', output_buffer.getvalue())
def _create_maintenance_error(error_code): # app.logger.exception(e) # gets logged automatically s_traceback = StringIO() traceback.print_exc(file=s_traceback) error_message = 'Your request was rejected since Cloudify ' \ 'manager is currently in maintenance mode' response = jsonify({ "message": error_message, "error_code": error_code, "server_traceback": s_traceback.getvalue() }) response.status_code = 503 return response
def _handle_local_workflow(self): try: self._workflow_started() result = self._execute_workflow_function() if 'error' in result: wrapped_exc = result['error'].wrapped_exc raise wrapped_exc self._workflow_succeeded() return result['result'] except Exception as e: error = StringIO() traceback.print_exc(file=error) self._workflow_failed(e, error.getvalue()) raise
def _remote_workflow_child_thread(self, queue): # the actual execution of the workflow will run in another thread. # this method is the entry point for that thread, and takes care of # forwarding the result or error back to the parent thread with state.current_workflow_ctx.push(self.ctx, self.kwargs): try: workflow_result = self._execute_workflow_function() queue.put({'result': workflow_result}) except api.ExecutionCancelled: queue.put({'result': api.EXECUTION_CANCELLED_RESULT}) except BaseException as workflow_ex: tb = StringIO() traceback.print_exc(file=tb) err = { 'type': type(workflow_ex).__name__, 'message': str(workflow_ex), 'traceback': tb.getvalue() } queue.put({'error': err})
def process(self, request): try: typed_request = json.loads(request) args = typed_request['args'] payload = process_ctx_request(self.ctx, args) result_type = 'result' if isinstance(payload, ScriptException): payload = dict(message=str(payload)) result_type = 'stop_operation' result = json.dumps({'type': result_type, 'payload': payload}) except Exception as e: tb = StringIO() traceback.print_exc(file=tb) payload = { 'type': type(e).__name__, 'message': str(e), 'traceback': tb.getvalue() } result = json.dumps({'type': 'error', 'payload': payload}) return result
def text(self): message = self._event['message']['text'] if self.is_log_message: message = u'{0}: {1}'.format(self.log_level, message) elif (self.event_type in ('task_rescheduled', 'task_failed')): causes = self._event['context'].get('task_error_causes', []) if causes: multiple_causes = len(causes) > 1 causes_out = StringIO() if multiple_causes: causes_out.write('Causes (most recent cause last):\n') for cause in causes: if multiple_causes: causes_out.write('{0}\n'.format('-' * 32)) causes_out.write(cause.get('traceback', '')) message = u'{0}\n{1}'.format(message, causes_out.getvalue()) return message
def _load_private_key(self, key_contents): """Load the private key and return a paramiko PKey subclass. :param key_contents: the contents of a keyfile, as a string starting with "---BEGIN" :return: A paramiko PKey subclass - RSA, ECDSA or Ed25519 """ for cls in (RSAKey, ECDSAKey, Ed25519Key): try: return cls.from_private_key(StringIO(key_contents)) except SSHException: continue raise exceptions.AgentInstallerConfigurationError( 'Could not load the private key as an ' 'RSA, ECDSA, or Ed25519 key')
def _merge_and_validate_execution_parameters(workflow, workflow_name, execution_parameters=None, allow_custom_parameters=False): merged_parameters = {} workflow_parameters = workflow.get('parameters', {}) execution_parameters = execution_parameters or {} missing_mandatory_parameters = set() allowed_types = { 'integer': int, 'float': float, 'string': (text_type, bytes), 'boolean': bool } wrong_types = {} for name, param in workflow_parameters.items(): if 'type' in param and name in execution_parameters: # check if need to convert from string if (isinstance(execution_parameters[name], (text_type, bytes)) and param['type'] in allowed_types and param['type'] != 'string'): execution_parameters[name] = \ _try_convert_from_str( execution_parameters[name], allowed_types[param['type']]) # validate type if not isinstance(execution_parameters[name], allowed_types.get(param['type'], object)): wrong_types[name] = param['type'] if 'default' not in param: if name not in execution_parameters: missing_mandatory_parameters.add(name) continue merged_parameters[name] = execution_parameters[name] else: merged_parameters[name] = execution_parameters[name] if \ name in execution_parameters else param['default'] if missing_mandatory_parameters: raise ValueError('Workflow "{0}" must be provided with the following ' 'parameters to execute: {1}'.format( workflow_name, ','.join(missing_mandatory_parameters))) if wrong_types: error_message = StringIO() for param_name, param_type in wrong_types.items(): error_message.write('Parameter "{0}" must be of type {1}\n'.format( param_name, param_type)) raise ValueError(error_message.getvalue()) custom_parameters = dict((k, v) for (k, v) in execution_parameters.items() if k not in workflow_parameters) if not allow_custom_parameters and custom_parameters: raise ValueError( 'Workflow "{0}" does not have the following parameters ' 'declared: {1}. Remove these parameters or use ' 'the flag for allowing custom parameters'.format( workflow_name, ','.join(custom_parameters))) merged_parameters.update(custom_parameters) return merged_parameters
def run(args, expected): output = StringIO() sys.stdout = output client.main(args) self.assertEqual(output.getvalue(), expected)