示例#1
0
def load_sleepy_workspace(instance):
    return workspace_from_load_target(
        PythonFileTarget(
            file_relative_path(__file__, 'sleepy.py'), 'sleepy_pipeline', working_directory=None
        ),
        instance,
    )
示例#2
0
文件: utils.py 项目: zuik/dagster
def define_out_of_process_workspace(python_file, fn_name):
    return Workspace(
        PythonFileTarget(
            python_file=python_file,
            attribute=fn_name,
            working_directory=None,
            location_name=main_repo_location_name(),
        ))
        def _mgr_fn(recon_repo):
            """Goes out of process but same process as host process"""
            check.inst_param(recon_repo, "recon_repo",
                             ReconstructableRepository)

            with Workspace(
                    PythonFileTarget(
                        python_file=file_relative_path(__file__, "setup.py"),
                        attribute="test_dict_repo",
                        working_directory=None,
                        location_name="test",
                    )) as workspace:
                yield workspace
        def _mgr_fn(recon_repo):
            """Goes out of process via grpc"""
            check.inst_param(recon_repo, "recon_repo",
                             ReconstructableRepository)

            loadable_target_origin = recon_repo.get_python_origin(
            ).loadable_target_origin
            with Workspace((PythonFileTarget(
                    python_file=loadable_target_origin.python_file,
                    attribute=loadable_target_origin.attribute,
                    working_directory=loadable_target_origin.working_directory,
                    location_name="test",
            ) if loadable_target_origin.python_file else ModuleTarget(
                    module_name=loadable_target_origin.module_name,
                    attribute=loadable_target_origin.attribute,
                    location_name="test",
            ))) as workspace:
                yield workspace
示例#5
0
def test_all_step_events():  # pylint: disable=too-many-locals
    workspace = workspace_from_load_target(
        PythonFileTarget(__file__, define_test_events_pipeline.__name__))
    pipeline_def = define_test_events_pipeline()
    mode = pipeline_def.get_default_mode_name()
    instance = DagsterInstance.ephemeral()
    execution_plan = create_execution_plan(pipeline_def, mode=mode)
    pipeline_run = instance.create_run_for_pipeline(
        pipeline_def=pipeline_def, execution_plan=execution_plan, mode=mode)
    step_levels = execution_plan.topological_step_levels()

    unhandled_events = STEP_EVENTS.copy()

    # Exclude types that are not step events
    ignored_events = {
        'LogMessageEvent',
        'PipelineStartEvent',
        'PipelineSuccessEvent',
        'PipelineInitFailureEvent',
        'PipelineFailureEvent',
    }

    event_counts = defaultdict(int)

    for step_level in step_levels:
        for step in step_level:

            variables = {
                'executionParams': {
                    'selector': {
                        'repositoryLocationName': 'test_events',
                        'repositoryName': '<<unnamed>>',
                        'pipelineName': pipeline_def.name,
                    },
                    'runConfigData': {
                        'storage': {
                            'filesystem': {}
                        }
                    },
                    'mode': mode,
                    'executionMetadata': {
                        'runId': pipeline_run.run_id
                    },
                    'stepKeys': [step.key],
                }
            }
            res = execute_query(
                workspace,
                EXECUTE_PLAN_MUTATION,
                variables,
                instance=instance,
            )

            # go through the same dict, decrement all the event records we've seen from the GraphQL
            # response
            if not res.get('errors'):
                assert 'data' in res, res
                assert 'executePlan' in res['data'], res
                assert 'stepEvents' in res['data']['executePlan'], res
                step_events = res['data']['executePlan']['stepEvents']

                events = [
                    dagster_event_from_dict(e, pipeline_def.name)
                    for e in step_events
                    if e['__typename'] not in ignored_events
                ]

                for event in events:
                    if event.step_key:
                        key = event.step_key + '.' + event.event_type_value
                    else:
                        key = event.event_type_value
                    event_counts[key] -= 1
                unhandled_events -= {
                    DagsterEventType(e.event_type_value)
                    for e in events
                }
            else:
                raise Exception(res['errors'])

    # build up a dict, incrementing all the event records we've produced in the run storage
    logs = instance.all_logs(pipeline_run.run_id)
    for log in logs:
        if not log.dagster_event or (DagsterEventType(
                log.dagster_event.event_type_value) not in STEP_EVENTS.union(
                    set([DagsterEventType.ENGINE_EVENT]))):
            continue
        if log.dagster_event.step_key:
            key = log.dagster_event.step_key + '.' + log.dagster_event.event_type_value
        else:
            key = log.dagster_event.event_type_value
        event_counts[key] += 1

    # Ensure we've processed all the events that were generated in the run storage
    assert sum(event_counts.values()) == 0

    # Ensure we've handled the universe of event types
    # Why are these retry events not handled? Because right now there is no way to configure retries
    # on executePlan -- this needs to change, and we should separate the ExecutionParams that get
    # sent to executePlan fromm those that get sent to startPipelineExecution and friends
    assert unhandled_events == {
        DagsterEventType.STEP_UP_FOR_RETRY, DagsterEventType.STEP_RESTARTED
    }
示例#6
0
def load_sleepy_workspace():
    return workspace_from_load_target(
        PythonFileTarget(file_relative_path(__file__, 'sleepy.py'),
                         'sleepy_pipeline'))