예제 #1
0
파일: cli.py 프로젝트: zuodh/dagster
def execute_query(workspace,
                  query,
                  variables=None,
                  use_sync_executor=False,
                  instance=None):
    check.inst_param(workspace, 'workspace', Workspace)
    check.str_param(query, 'query')
    check.opt_dict_param(variables, 'variables')
    instance = (check.inst_param(instance, 'instance', DagsterInstance)
                if instance else DagsterInstance.get())
    check.bool_param(use_sync_executor, 'use_sync_executor')

    query = query.strip('\'" \n\t')

    locations = [
        RepositoryLocation.from_handle(x)
        for x in workspace.repository_location_handles
    ]

    context = DagsterGraphQLContext(
        locations=locations,
        instance=instance,
        version=__version__,
    )

    executor = SyncExecutor() if use_sync_executor else GeventExecutor()

    result = graphql(
        request_string=query,
        schema=create_schema(),
        context_value=context,
        variable_values=variables,
        executor=executor,
    )

    result_dict = result.to_dict()

    context.drain_outstanding_executions()

    # Here we detect if this is in fact an error response
    # If so, we iterate over the result_dict and the original result
    # which contains a GraphQLError. If that GraphQL error contains
    # an original_error property (which is the exception the resolver
    # has thrown, typically) we serialize the stack trace of that exception
    # in the 'stack_trace' property of each error to ease debugging

    if 'errors' in result_dict:
        check.invariant(len(result_dict['errors']) == len(result.errors))
        for python_error, error_dict in zip(result.errors,
                                            result_dict['errors']):
            if hasattr(python_error,
                       'original_error') and python_error.original_error:
                error_dict['stack_trace'] = get_stack_trace_array(
                    python_error.original_error)

    return result_dict
예제 #2
0
def test_execute_hammer_through_dagit():
    with instance_for_test() as instance:
        with get_workspace_process_context_from_kwargs(
            instance,
            version="",
            read_only=False,
            kwargs={
                "python_file": file_relative_path(
                    __file__, "../../../dagster-test/dagster_test/toys/hammer.py"
                ),
                "attribute": "hammer_pipeline",
            },
        ) as workspace_process_context:
            context = workspace_process_context.create_request_context()
            selector = infer_pipeline_selector(context, "hammer_pipeline")
            executor = SyncExecutor()

            variables = {
                "executionParams": {
                    "runConfigData": {
                        "execution": {"dask": {"config": {"cluster": {"local": {}}}}},
                    },
                    "selector": selector,
                    "mode": "default",
                }
            }

            start_pipeline_result = graphql(
                request_string=LAUNCH_PIPELINE_EXECUTION_MUTATION,
                schema=create_schema(),
                context=context,
                variables=variables,
                executor=executor,
            )

            if start_pipeline_result.errors:
                raise Exception("{}".format(start_pipeline_result.errors))

            run_id = start_pipeline_result.data["launchPipelineExecution"]["run"]["runId"]

            context.instance.run_launcher.join(timeout=60)

            subscription = execute_dagster_graphql(
                context, SUBSCRIPTION_QUERY, variables={"runId": run_id}
            )

            subscribe_results = []
            subscription.subscribe(subscribe_results.append)

            messages = [
                x["__typename"] for x in subscribe_results[0].data["pipelineRunLogs"]["messages"]
            ]

            assert "RunStartEvent" in messages
            assert "RunSuccessEvent" in messages
예제 #3
0
def execute_query(handle,
                  query,
                  variables=None,
                  use_sync_executor=False,
                  instance=None):
    check.inst_param(handle, 'handle', ExecutionTargetHandle)
    check.str_param(query, 'query')
    check.opt_dict_param(variables, 'variables')
    # We allow external creation of the pipeline_run_storage to support testing contexts where we
    # need access to the underlying run storage
    instance = check.opt_inst_param(instance, 'instance', DagsterInstance,
                                    DagsterInstance.get())
    check.bool_param(use_sync_executor, 'use_sync_executor')

    query = query.strip('\'" \n\t')

    execution_manager = SynchronousExecutionManager()

    context = DagsterGraphQLContext(handle=handle,
                                    instance=instance,
                                    execution_manager=execution_manager,
                                    version=__version__)

    executor = SyncExecutor() if use_sync_executor else GeventExecutor()

    result = graphql(
        request_string=query,
        schema=create_schema(),
        context=context,
        variables=variables,
        executor=executor,
    )

    result_dict = result.to_dict()

    # Here we detect if this is in fact an error response
    # If so, we iterate over the result_dict and the original result
    # which contains a GraphQLError. If that GraphQL error contains
    # an original_error property (which is the exception the resolver
    # has thrown, typically) we serialize the stack trace of that exception
    # in the 'stack_trace' property of each error to ease debugging

    if 'errors' in result_dict:
        check.invariant(len(result_dict['errors']) == len(result.errors))
        for python_error, error_dict in zip(result.errors,
                                            result_dict['errors']):
            if hasattr(python_error,
                       'original_error') and python_error.original_error:
                error_dict['stack_trace'] = get_stack_trace_array(
                    python_error.original_error)

    return result_dict
예제 #4
0
파일: cli.py 프로젝트: bbbbbgit/dagster
def execute_query(recon_repo,
                  query,
                  variables=None,
                  use_sync_executor=False,
                  instance=None):
    check.inst_param(recon_repo, 'recon_repo', ReconstructableRepository)
    check.str_param(query, 'query')
    check.opt_dict_param(variables, 'variables')
    instance = check.opt_inst_param(instance, 'instance', DagsterInstance,
                                    DagsterInstance.get())
    check.bool_param(use_sync_executor, 'use_sync_executor')

    query = query.strip('\'" \n\t')

    execution_manager = SynchronousExecutionManager()

    context = DagsterGraphQLInProcessRepositoryContext(
        recon_repo=recon_repo,
        instance=instance,
        execution_manager=execution_manager,
        version=__version__,
    )

    executor = SyncExecutor() if use_sync_executor else GeventExecutor()

    result = graphql(
        request_string=query,
        schema=create_schema(),
        context_value=context,
        variable_values=variables,
        executor=executor,
    )

    result_dict = result.to_dict()

    # Here we detect if this is in fact an error response
    # If so, we iterate over the result_dict and the original result
    # which contains a GraphQLError. If that GraphQL error contains
    # an original_error property (which is the exception the resolver
    # has thrown, typically) we serialize the stack trace of that exception
    # in the 'stack_trace' property of each error to ease debugging

    if 'errors' in result_dict:
        check.invariant(len(result_dict['errors']) == len(result.errors))
        for python_error, error_dict in zip(result.errors,
                                            result_dict['errors']):
            if hasattr(python_error,
                       'original_error') and python_error.original_error:
                error_dict['stack_trace'] = get_stack_trace_array(
                    python_error.original_error)

    return result_dict
예제 #5
0
def test_execute_hammer_through_dagit():
    recon_repo = ReconstructableRepository.for_file(
        file_relative_path(__file__, '../../../../examples/dagster_examples/toys/hammer.py'),
        'hammer_pipeline',
    )
    instance = DagsterInstance.local_temp()

    context = DagsterGraphQLContext(
        locations=[InProcessRepositoryLocation(recon_repo)], instance=instance,
    )

    selector = get_legacy_pipeline_selector(context, 'hammer_pipeline')

    executor = SyncExecutor()

    variables = {
        'executionParams': {
            'runConfigData': {
                'storage': {'filesystem': {}},
                'execution': {'dask': {'config': {'cluster': {'local': {}}}}},
            },
            'selector': selector,
            'mode': 'default',
        }
    }

    start_pipeline_result = graphql(
        request_string=START_PIPELINE_EXECUTION_MUTATION,
        schema=create_schema(),
        context=context,
        variables=variables,
        executor=executor,
    )

    if start_pipeline_result.errors:
        raise Exception('{}'.format(start_pipeline_result.errors))

    run_id = start_pipeline_result.data['startPipelineExecution']['run']['runId']

    context.drain_outstanding_executions()

    subscription = execute_dagster_graphql(context, SUBSCRIPTION_QUERY, variables={'runId': run_id})

    subscribe_results = []
    subscription.subscribe(subscribe_results.append)

    messages = [x['__typename'] for x in subscribe_results[0].data['pipelineRunLogs']['messages']]

    assert 'PipelineStartEvent' in messages
    assert 'PipelineSuccessEvent' in messages
예제 #6
0
파일: cli.py 프로젝트: G9999/dagster
def execute_query(workspace,
                  query,
                  variables=None,
                  use_sync_executor=False,
                  instance=None):
    check.inst_param(workspace, "workspace", Workspace)
    check.str_param(query, "query")
    check.opt_dict_param(variables, "variables")
    instance = (check.inst_param(instance, "instance", DagsterInstance)
                if instance else DagsterInstance.get())
    check.bool_param(use_sync_executor, "use_sync_executor")

    query = query.strip("'\" \n\t")

    context = DagsterGraphQLContext(
        workspace=workspace,
        instance=instance,
        version=__version__,
    )

    executor = SyncExecutor() if use_sync_executor else GeventExecutor()

    result = graphql(
        request_string=query,
        schema=create_schema(),
        context_value=context,
        variable_values=variables,
        executor=executor,
    )

    result_dict = result.to_dict()

    # Here we detect if this is in fact an error response
    # If so, we iterate over the result_dict and the original result
    # which contains a GraphQLError. If that GraphQL error contains
    # an original_error property (which is the exception the resolver
    # has thrown, typically) we serialize the stack trace of that exception
    # in the 'stack_trace' property of each error to ease debugging

    if "errors" in result_dict:
        check.invariant(len(result_dict["errors"]) == len(result.errors))
        for python_error, error_dict in zip(result.errors,
                                            result_dict["errors"]):
            if hasattr(python_error,
                       "original_error") and python_error.original_error:
                error_dict["stack_trace"] = get_stack_trace_array(
                    python_error.original_error)

    return result_dict
예제 #7
0
def test_execute_hammer_through_dagit():
    handle = ExecutionTargetHandle.for_pipeline_python_file(
        file_relative_path(__file__, '../../../../examples/dagster_examples/toys/hammer.py'),
        'hammer_pipeline',
    )
    instance = DagsterInstance.local_temp()

    execution_manager = SubprocessExecutionManager(instance)

    context = DagsterGraphQLContext(
        handle=handle, execution_manager=execution_manager, instance=instance
    )

    executor = SyncExecutor()

    variables = {
        'executionParams': {
            'environmentConfigData': {'storage': {'filesystem': {}}, 'execution': {'dask': {}}},
            'selector': {'name': handle.build_pipeline_definition().name},
            'mode': 'default',
        }
    }

    start_pipeline_result = graphql(
        request_string=START_PIPELINE_EXECUTION_MUTATION,
        schema=create_schema(),
        context=context,
        variables=variables,
        executor=executor,
    )

    run_id = start_pipeline_result.data['startPipelineExecution']['run']['runId']

    context.execution_manager.join()

    subscription = execute_dagster_graphql(context, SUBSCRIPTION_QUERY, variables={'runId': run_id})

    subscribe_results = []
    subscription.subscribe(subscribe_results.append)

    messages = [x['__typename'] for x in subscribe_results[0].data['pipelineRunLogs']['messages']]

    assert 'PipelineStartEvent' in messages
    assert 'PipelineSuccessEvent' in messages
예제 #8
0
def test_query():
    from graphql.execution.executors.sync import SyncExecutor

    schema = graphene.Schema(query=Query)
    result = schema.execute(
        """
      query {
        listFoos {
          id
          name
        }
    }
    """,
        executor=SyncExecutor(),
        return_promise=False,
    )

    assert result.errors is None
    assert result.data is not None
    assert [x["name"] for x in result.data["listFoos"]] == ["foo", "bar"]
예제 #9
0
def generate_schema(db, models):
    query_classes = {}
    mutation_classes = {}
    registry = Registry()
    for model in models:
        node_class = get_node(db, model, registry)
        connection_class = get_connection(node_class)
        node_name = node_class.__name__
        entity_name = inflection.underscore(node_name)
        entities_name = get_many_field_name(entity_name)
        query_classes.update({
            entity_name:
            PeeweeNodeField(node_class),
            entities_name:
            PeeweeConnectionField(connection_class),
        })
        for mutation_class in (
                CreateOneMutation,
                CreateManyMutation,
                UpdateOneMutation,
                UpdateManyMutation,
                DeleteOneMutation,
                DeleteManyMutation,
                CloneOneMutation,
        ):
            mutation_name = re.sub(
                r'(.*)(One|Many)Mutation', lambda m: m.group(1) +
                (node_name
                 if m.group(2) == 'One' else get_many_field_name(node_name)),
                mutation_class.__name__)
            mutation_classes[inflection.underscore(
                mutation_name)] = mutation_class.generate(
                    node_class, connection_class).Field()
    query_class = type('Query', (ObjectType, ), query_classes)
    mutation_class = type('Mutation', (ObjectType, ), mutation_classes)
    executor = SyncExecutor()
    schema = Schema(query=query_class,
                    mutation=mutation_class,
                    auto_camelcase=False)
    return schema, executor
예제 #10
0
def test_query_with_match():
    from graphql.execution.executors.sync import SyncExecutor

    global foo_bars
    foo_bars = [FooBar(name="test", count=1)]

    schema = graphene.Schema(query=Query)
    result = schema.execute(
        """
        query {
            listFooBars(match: {name: "test", count: 1}) {
            name
            count
            }
        }
        """,
        executor=SyncExecutor(),
        return_promise=False,
    )

    assert result.errors is None
    assert result.data is not None
    assert pydantic.parse_obj_as(List[FooBar],
                                 result.data["listFooBars"]) == foo_bars
예제 #11
0
import pytest
from promise import Promise
from promise.dataloader import DataLoader

from graphql import GraphQLObjectType, GraphQLField, GraphQLID, GraphQLArgument, GraphQLNonNull, GraphQLSchema, parse, execute
from graphql.execution.executors.sync import SyncExecutor
from graphql.execution.executors.thread import ThreadExecutor


@pytest.mark.parametrize(
    "executor",
    [
        SyncExecutor(),
        # ThreadExecutor(),
    ])
def test_batches_correctly(executor):

    Business = GraphQLObjectType(
        'Business', lambda: {
            'id':
            GraphQLField(GraphQLID, resolver=lambda root, info, **args: root),
        })

    Query = GraphQLObjectType(
        'Query', lambda: {
            'getBusiness':
            GraphQLField(Business,
                         args={
                             'id': GraphQLArgument(GraphQLNonNull(GraphQLID)),
                         },
                         resolver=lambda root, info, **args: info.context.
예제 #12
0
 def get_graphql_params(self, *args, **kwargs):
     params = super(GeventSubscriptionServer,
                    self).get_graphql_params(*args, **kwargs)
     return dict(params, executor=SyncExecutor())
예제 #13
0
def test_execute_hammer_through_dagit():
    recon_repo = ReconstructableRepository.for_file(
        file_relative_path(
            __file__, "../../../dagster-test/dagster_test/toys/hammer.py"),
        "hammer_pipeline",
    )
    instance = DagsterInstance.local_temp()

    context = DagsterGraphQLContext(
        workspace=Workspace([
            RepositoryLocationHandle.create_in_process_location(
                recon_repo.pointer)
        ]),
        instance=instance,
    )

    selector = infer_pipeline_selector(context, "hammer_pipeline")

    executor = SyncExecutor()

    variables = {
        "executionParams": {
            "runConfigData": {
                "storage": {
                    "filesystem": {}
                },
                "execution": {
                    "dask": {
                        "config": {
                            "cluster": {
                                "local": {}
                            }
                        }
                    }
                },
            },
            "selector": selector,
            "mode": "default",
        }
    }

    start_pipeline_result = graphql(
        request_string=LAUNCH_PIPELINE_EXECUTION_MUTATION,
        schema=create_schema(),
        context=context,
        variables=variables,
        executor=executor,
    )

    if start_pipeline_result.errors:
        raise Exception("{}".format(start_pipeline_result.errors))

    run_id = start_pipeline_result.data["launchPipelineExecution"]["run"][
        "runId"]

    context.drain_outstanding_executions()

    subscription = execute_dagster_graphql(context,
                                           SUBSCRIPTION_QUERY,
                                           variables={"runId": run_id})

    subscribe_results = []
    subscription.subscribe(subscribe_results.append)

    messages = [
        x["__typename"]
        for x in subscribe_results[0].data["pipelineRunLogs"]["messages"]
    ]

    assert "PipelineStartEvent" in messages
    assert "PipelineSuccessEvent" in messages
예제 #14
0
def run_http_query(
    schema,  # type: GraphQLSchema
    request_method,  # type: str
    data,  # type: Union[Dict, List[Dict]]
    query_data=None,  # type: Optional[Dict]
    batch_enabled=False,  # type: bool
    catch=False,  # type: bool
    **execute_options  # type: Any
):
    """Execute GraphQL coming from an HTTP query against a given schema.

    You need to pass the schema (that is supposed to be already validated),
    the request_method (that must be either "get" or "post"),
    the data from the HTTP request body, and the data from the query string.
    By default, only one parameter set is expected, but if you set batch_enabled,
    you can pass data that contains a list of parameter sets to run multiple
    queries as a batch execution using a single HTTP request. You can specify
    whether results returning HTTPQueryErrors should be caught and skipped.
    All other keyword arguments are passed on to the GraphQL-Core function for
    executing GraphQL queries.

    Returns a ServerResults tuple with the list of ExecutionResults as first item
    and the list of parameters that have been used for execution as second item.
    """
    if not isinstance(schema, GraphQLSchema):
        raise TypeError("Expected a GraphQL schema, but received {!r}.".format(schema))
    if request_method not in ("get", "post"):
        raise HttpQueryError(
            405,
            "GraphQL only supports GET and POST requests.",
            headers={"Allow": "GET, POST"},
        )
    if catch:
        catch_exc = (
            HttpQueryError
        )  # type: Union[Type[HttpQueryError], Type[_NoException]]
    else:
        catch_exc = _NoException
    is_batch = isinstance(data, list)

    is_get_request = request_method == "get"
    allow_only_query = is_get_request

    if not is_batch:
        if not isinstance(data, (dict, MutableMapping)):
            raise HttpQueryError(
                400, "GraphQL params should be a dict. Received {!r}.".format(data)
            )
        data = [data]
    elif not batch_enabled:
        raise HttpQueryError(400, "Batch GraphQL requests are not enabled.")

    if not data:
        raise HttpQueryError(400, "Received an empty list in the batch request.")

    extra_data = {}  # type: Dict[str, Any]
    # If is a batch request, we don't consume the data from the query
    if not is_batch:
        extra_data = query_data or {}

    all_params = [get_graphql_params(entry, extra_data) for entry in data]

    executor = execute_options.get("executor")
    response_executor = executor if executor else SyncExecutor()

    response_promises = [
        response_executor.execute(
            get_response, schema, params, catch_exc, allow_only_query, **execute_options
        )
        for params in all_params
    ]
    response_executor.wait_until_finished()

    results = [
        result.get() if is_thenable(result) else result for result in response_promises
    ]

    return ServerResults(results, all_params)
예제 #15
0
        """
      query {
        listDepartments {
          id,
          name,
          employees {
            ...on Employee {
              id
              name
              hiredOn
              salary { rating }
            }
            ...on Manager {
              name
              salary {
                rating
                amount
              }
              teamSize
            }
          }
        }
      }
""",
        executor=SyncExecutor(),
        return_promise=False,
    )

    print(result.errors)
    print(json.dumps(result.data, indent=2))
예제 #16
0
from .app import create_app, url_string, parametrize_sync_async_app_test
from .schema import Schema, AsyncSchema


def response_json(response):
    return json.loads(response.body.decode())


j = lambda **kwargs: json.dumps(kwargs)
jl = lambda **kwargs: json.dumps([kwargs])


@pytest.mark.parametrize('view,expected', [
    (GraphQLView(schema=Schema), False),
    (GraphQLView(schema=Schema, executor=SyncExecutor()), False),
    (GraphQLView(schema=Schema, executor=AsyncioExecutor()), True),
])
def test_eval(view, expected):
    assert view._enable_async == expected


@pytest.mark.parametrize('app', [
    (create_app(async_executor=False)),
    (create_app(async_executor=True)),
])
def test_allows_get_with_query_param(app):
    _, response = app.client.get(uri=url_string(query='{test}'))

    assert response.status == 200
    assert response_json(response) == {'data': {'test': "Hello World"}}