def format_error_with_stack_trace(error: Exception) -> Dict[str, Any]: formatted_error = {"message": str(error)} # type: Dict[str, Any] if isinstance(error, GraphQLError): if error.locations is not None: formatted_error["locations"] = [{ "line": loc.line, "column": loc.column } for loc in error.locations] if error.path is not None: formatted_error["path"] = error.path # this is what is different about this implementation # we print out stack traces to ease debugging if hasattr(error, "original_error") and error.original_error: formatted_error["stack_trace"] = get_stack_trace_array( error.original_error) else: formatted_error["stack_trace"] = get_stack_trace_array(error) if hasattr(error, "__cause__") and error.__cause__: formatted_error["cause"] = format_error_with_stack_trace( error.__cause__) return formatted_error
def format_error_with_stack_trace(error): # type: (Exception) -> Dict[str, Any] formatted_error = {'message': text_type(error)} # type: Dict[str, Any] if isinstance(error, GraphQLError): if error.locations is not None: formatted_error['locations'] = [{ 'line': loc.line, 'column': loc.column } for loc in error.locations] if error.path is not None: formatted_error['path'] = error.path # this is what is different about this implementation # we print out stack traces to ease debugging if hasattr(error, 'original_error') and error.original_error: formatted_error['stack_trace'] = get_stack_trace_array( error.original_error) else: formatted_error['stack_trace'] = get_stack_trace_array(error) if hasattr(error, '__cause__') and error.__cause__: formatted_error['cause'] = format_error_with_stack_trace( error.__cause__) return formatted_error
def execute_query( handle, query, variables=None, pipeline_run_storage=None, scheduler=None, raise_on_error=False, use_sync_executor=False, ): check.inst_param(handle, 'handle', ExecutionTargetHandle) check.str_param(query, 'query') check.opt_dict_param(variables, 'variables') # We allow external creation of the pipeline_run_storage to support testing contexts where we # need access to the underlying run storage check.opt_inst_param(pipeline_run_storage, 'pipeline_run_storage', RunStorage) check.opt_inst_param(scheduler, 'scheduler', Scheduler) check.bool_param(raise_on_error, 'raise_on_error') check.bool_param(use_sync_executor, 'use_sync_executor') query = query.strip('\'" \n\t') execution_manager = SynchronousExecutionManager() pipeline_run_storage = pipeline_run_storage or InMemoryRunStorage() context = DagsterGraphQLContext( handle=handle, pipeline_runs=pipeline_run_storage, scheduler=scheduler, execution_manager=execution_manager, raise_on_error=raise_on_error, version=__version__, ) executor = SyncExecutor() if use_sync_executor else GeventExecutor() result = graphql( request_string=query, schema=create_schema(), context=context, variables=variables, executor=executor, ) result_dict = result.to_dict() # Here we detect if this is in fact an error response # If so, we iterate over the result_dict and the original result # which contains a GraphQLError. If that GraphQL error contains # an original_error property (which is the exception the resolver # has thrown, typically) we serialize the stack trace of that exception # in the 'stack_trace' property of each error to ease debugging if 'errors' in result_dict: check.invariant(len(result_dict['errors']) == len(result.errors)) for python_error, error_dict in zip(result.errors, result_dict['errors']): if hasattr(python_error, 'original_error') and python_error.original_error: error_dict['stack_trace'] = get_stack_trace_array(python_error.original_error) return result_dict
def execute_query(recon_repo, query, variables=None, use_sync_executor=False, instance=None): check.inst_param(recon_repo, 'recon_repo', ReconstructableRepository) check.str_param(query, 'query') check.opt_dict_param(variables, 'variables') instance = (check.inst_param(instance, 'instance', DagsterInstance) if instance else DagsterInstance.get()) check.bool_param(use_sync_executor, 'use_sync_executor') query = query.strip('\'" \n\t') execution_manager = SubprocessExecutionManager(instance) context = DagsterGraphQLContext( environments=[ InProcessDagsterEnvironment( recon_repo, execution_manager=execution_manager, ) ], instance=instance, version=__version__, ) executor = SyncExecutor() if use_sync_executor else GeventExecutor() result = graphql( request_string=query, schema=create_schema(), context_value=context, variable_values=variables, executor=executor, ) result_dict = result.to_dict() execution_manager.join() # Here we detect if this is in fact an error response # If so, we iterate over the result_dict and the original result # which contains a GraphQLError. If that GraphQL error contains # an original_error property (which is the exception the resolver # has thrown, typically) we serialize the stack trace of that exception # in the 'stack_trace' property of each error to ease debugging if 'errors' in result_dict: check.invariant(len(result_dict['errors']) == len(result.errors)) for python_error, error_dict in zip(result.errors, result_dict['errors']): if hasattr(python_error, 'original_error') and python_error.original_error: error_dict['stack_trace'] = get_stack_trace_array( python_error.original_error) return result_dict
def execute_query(workspace, query, variables=None, use_sync_executor=False, instance=None): check.inst_param(workspace, 'workspace', Workspace) check.str_param(query, 'query') check.opt_dict_param(variables, 'variables') instance = (check.inst_param(instance, 'instance', DagsterInstance) if instance else DagsterInstance.get()) check.bool_param(use_sync_executor, 'use_sync_executor') query = query.strip('\'" \n\t') locations = [ RepositoryLocation.from_handle(x) for x in workspace.repository_location_handles ] context = DagsterGraphQLContext( locations=locations, instance=instance, version=__version__, ) executor = SyncExecutor() if use_sync_executor else GeventExecutor() result = graphql( request_string=query, schema=create_schema(), context_value=context, variable_values=variables, executor=executor, ) result_dict = result.to_dict() context.drain_outstanding_executions() # Here we detect if this is in fact an error response # If so, we iterate over the result_dict and the original result # which contains a GraphQLError. If that GraphQL error contains # an original_error property (which is the exception the resolver # has thrown, typically) we serialize the stack trace of that exception # in the 'stack_trace' property of each error to ease debugging if 'errors' in result_dict: check.invariant(len(result_dict['errors']) == len(result.errors)) for python_error, error_dict in zip(result.errors, result_dict['errors']): if hasattr(python_error, 'original_error') and python_error.original_error: error_dict['stack_trace'] = get_stack_trace_array( python_error.original_error) return result_dict
def execute_query(workspace, query, variables=None, use_sync_executor=False, instance=None): check.inst_param(workspace, "workspace", Workspace) check.str_param(query, "query") check.opt_dict_param(variables, "variables") instance = (check.inst_param(instance, "instance", DagsterInstance) if instance else DagsterInstance.get()) check.bool_param(use_sync_executor, "use_sync_executor") query = query.strip("'\" \n\t") context = DagsterGraphQLContext( workspace=workspace, instance=instance, version=__version__, ) executor = SyncExecutor() if use_sync_executor else GeventExecutor() result = graphql( request_string=query, schema=create_schema(), context_value=context, variable_values=variables, executor=executor, ) result_dict = result.to_dict() # Here we detect if this is in fact an error response # If so, we iterate over the result_dict and the original result # which contains a GraphQLError. If that GraphQL error contains # an original_error property (which is the exception the resolver # has thrown, typically) we serialize the stack trace of that exception # in the 'stack_trace' property of each error to ease debugging if "errors" in result_dict: check.invariant(len(result_dict["errors"]) == len(result.errors)) for python_error, error_dict in zip(result.errors, result_dict["errors"]): if hasattr(python_error, "original_error") and python_error.original_error: error_dict["stack_trace"] = get_stack_trace_array( python_error.original_error) return result_dict
def execute_query(workspace_process_context, query, variables=None): check.inst_param(workspace_process_context, "workspace_process_context", WorkspaceProcessContext) check.str_param(query, "query") check.opt_dict_param(variables, "variables") query = query.strip("'\" \n\t") context = workspace_process_context.create_request_context() result = graphql( request_string=query, schema=create_schema(), context_value=context, variable_values=variables, ) result_dict = result.to_dict() # Here we detect if this is in fact an error response # If so, we iterate over the result_dict and the original result # which contains a GraphQLError. If that GraphQL error contains # an original_error property (which is the exception the resolver # has thrown, typically) we serialize the stack trace of that exception # in the 'stack_trace' property of each error to ease debugging if "errors" in result_dict: check.invariant(len(result_dict["errors"]) == len(result.errors)) for python_error, error_dict in zip(result.errors, result_dict["errors"]): if hasattr(python_error, "original_error") and python_error.original_error: error_dict["stack_trace"] = get_stack_trace_array( python_error.original_error) return result_dict