Esempio n. 1
0
def test_get_single_schedule_definition(graphql_context):
    context = graphql_context
    instance = context.instance

    instance.reconcile_scheduler_state(
        external_repository=context.get_repository_location(
            main_repo_location_name()
        ).get_repository(main_repo_name()),
    )

    schedule_selector = infer_schedule_selector(context, "partition_based_multi_mode_decorator")
    result = execute_dagster_graphql(
        context, GET_SCHEDULE_DEFINITION, variables={"scheduleSelector": schedule_selector}
    )

    assert result.data

    assert result.data["scheduleDefinitionOrError"]["__typename"] == "ScheduleDefinition"
    assert result.data["scheduleDefinitionOrError"]["partitionSet"]
    assert not result.data["scheduleDefinitionOrError"]["executionTimezone"]

    schedule_selector = infer_schedule_selector(context, "timezone_schedule")
    result = execute_dagster_graphql(
        context, GET_SCHEDULE_DEFINITION, variables={"scheduleSelector": schedule_selector}
    )

    assert result.data
    assert result.data["scheduleDefinitionOrError"]["__typename"] == "ScheduleDefinition"
    assert result.data["scheduleDefinitionOrError"]["executionTimezone"] == "US/Central"
Esempio n. 2
0
def test_start_and_stop_schedule(graphql_context):
    # selector = infer_repository_selector(graphql_context)

    external_repository = graphql_context.get_repository_location(
        main_repo_location_name()).get_repository(main_repo_name())
    graphql_context.instance.reconcile_scheduler_state(external_repository)

    schedule_selector = infer_schedule_selector(
        graphql_context, "no_config_pipeline_hourly_schedule")

    # Start a single schedule
    start_result = execute_dagster_graphql(
        graphql_context,
        START_SCHEDULES_QUERY,
        variables={"scheduleSelector": schedule_selector},
    )
    assert (start_result.data["startSchedule"]["scheduleState"]["status"] ==
            ScheduleStatus.RUNNING.value)

    schedule_origin_id = start_result.data["startSchedule"]["scheduleState"][
        "scheduleOriginId"]

    # Stop a single schedule
    stop_result = execute_dagster_graphql(
        graphql_context,
        STOP_SCHEDULES_QUERY,
        variables={"scheduleOriginId": schedule_origin_id},
    )
    assert (stop_result.data["stopRunningSchedule"]["scheduleState"]["status"]
            == ScheduleStatus.STOPPED.value)
Esempio n. 3
0
def test_next_tick_bad_schedule(graphql_context):
    schedule_selector = infer_schedule_selector(graphql_context,
                                                "run_config_error_schedule")

    # Start a single schedule, future tick run requests only available for running schedules
    start_result = execute_dagster_graphql(
        graphql_context,
        START_SCHEDULES_QUERY,
        variables={"scheduleSelector": schedule_selector},
    )
    assert (start_result.data["startSchedule"]["scheduleState"]["status"] ==
            InstigatorStatus.RUNNING.value)

    # get schedule next tick
    result = execute_dagster_graphql(
        graphql_context,
        GET_SCHEDULE_QUERY,
        variables={"scheduleSelector": schedule_selector})

    future_ticks = result.data["scheduleOrError"]["futureTicks"]

    assert future_ticks
    assert len(future_ticks["results"]) == 3
    for tick in future_ticks["results"]:
        assert tick["evaluationResult"]
        assert not tick["evaluationResult"]["runRequests"]
        assert not tick["evaluationResult"]["skipReason"]
        assert tick["evaluationResult"]["error"]
Esempio n. 4
0
def test_next_tick_bad_schedule(graphql_context):
    external_repository = graphql_context.get_repository_location(
        main_repo_location_name()).get_repository(main_repo_name())
    graphql_context.instance.reconcile_scheduler_state(external_repository)

    schedule_selector = infer_schedule_selector(graphql_context,
                                                "run_config_error_schedule")

    # Start a single schedule, future tick run requests only available for running schedules
    start_result = execute_dagster_graphql(
        graphql_context,
        START_SCHEDULES_QUERY,
        variables={"scheduleSelector": schedule_selector},
    )
    assert start_result.data["startSchedule"]["scheduleState"][
        "status"] == JobStatus.RUNNING.value

    # get schedule next tick
    result = execute_dagster_graphql(
        graphql_context,
        GET_SCHEDULE_QUERY,
        variables={"scheduleSelector": schedule_selector})

    future_ticks = result.data["scheduleOrError"]["futureTicks"]

    assert future_ticks
    assert len(future_ticks["results"]) == 3
    for tick in future_ticks["results"]:
        assert tick["evaluationResult"]
        assert not tick["evaluationResult"]["runRequests"]
        assert not tick["evaluationResult"]["skipReason"]
        assert tick["evaluationResult"]["error"]
Esempio n. 5
0
def test_start_and_stop_schedule(graphql_context):
    schedule_selector = infer_schedule_selector(
        graphql_context, "no_config_pipeline_hourly_schedule"
    )

    # Start a single schedule
    start_result = execute_dagster_graphql(
        graphql_context,
        START_SCHEDULES_QUERY,
        variables={"scheduleSelector": schedule_selector},
    )
    assert (
        start_result.data["startSchedule"]["scheduleState"]["status"]
        == InstigatorStatus.RUNNING.value
    )

    schedule_origin_id = start_result.data["startSchedule"]["scheduleState"]["id"]

    # Stop a single schedule
    stop_result = execute_dagster_graphql(
        graphql_context,
        STOP_SCHEDULES_QUERY,
        variables={"scheduleOriginId": schedule_origin_id},
    )
    assert (
        stop_result.data["stopRunningSchedule"]["scheduleState"]["status"]
        == InstigatorStatus.STOPPED.value
    )
Esempio n. 6
0
def test_start_schedule_with_default_status(graphql_context):
    schedule_selector = infer_schedule_selector(graphql_context,
                                                "running_in_code_schedule")

    result = execute_dagster_graphql(
        graphql_context,
        GET_SCHEDULE_STATE_QUERY,
        variables={"scheduleSelector": schedule_selector},
    )

    schedule_origin_id = result.data["scheduleOrError"]["scheduleState"]["id"]
    schedule_selector_id = result.data["scheduleOrError"]["scheduleState"][
        "selectorId"]

    assert result.data["scheduleOrError"]["scheduleState"][
        "status"] == "RUNNING"

    # Start a single schedule
    start_result = execute_dagster_graphql(
        graphql_context,
        START_SCHEDULES_QUERY,
        variables={"scheduleSelector": schedule_selector},
    )

    assert (
        "You have attempted to start schedule running_in_code_schedule, but it is already running"
        in start_result.data["startSchedule"]["message"])

    # Stop a single schedule
    stop_result = execute_dagster_graphql(
        graphql_context,
        STOP_SCHEDULES_QUERY,
        variables={
            "scheduleOriginId": schedule_origin_id,
            "scheduleSelectorId": schedule_selector_id,
        },
    )
    assert (stop_result.data["stopRunningSchedule"]["scheduleState"]["status"]
            == InstigatorStatus.STOPPED.value)

    # Start a single schedule
    start_result = execute_dagster_graphql(
        graphql_context,
        START_SCHEDULES_QUERY,
        variables={"scheduleSelector": schedule_selector},
    )

    assert (start_result.data["startSchedule"]["scheduleState"]["status"] ==
            InstigatorStatus.RUNNING.value)
Esempio n. 7
0
def test_future_ticks_until(graphql_context):
    schedule_selector = infer_schedule_selector(graphql_context,
                                                "timezone_schedule")

    future_ticks_start_time = create_pendulum_time(
        2019, 2, 27, tz="US/Central").timestamp()

    # Start a single schedule, future tick run requests only available for running schedules
    start_result = execute_dagster_graphql(
        graphql_context,
        START_SCHEDULES_QUERY,
        variables={"scheduleSelector": schedule_selector},
    )
    assert (start_result.data["startSchedule"]["scheduleState"]["status"] ==
            InstigatorStatus.RUNNING.value)

    future_ticks_start_time = create_pendulum_time(
        2019, 2, 27, tz="US/Central").timestamp()
    future_ticks_end_time = create_pendulum_time(2019, 3, 2,
                                                 tz="US/Central").timestamp()

    result = execute_dagster_graphql(
        graphql_context,
        GET_SCHEDULE_FUTURE_TICKS_UNTIL,
        variables={
            "scheduleSelector": schedule_selector,
            "ticksAfter": future_ticks_start_time,
            "ticksUntil": future_ticks_end_time,
        },
    )

    future_ticks = result.data["scheduleOrError"]["futureTicks"]

    assert future_ticks
    assert len(future_ticks["results"]) == 3

    timestamps = [
        future_tick["timestamp"] for future_tick in future_ticks["results"]
    ]

    assert timestamps == [
        create_pendulum_time(2019, 2, 27, tz="US/Central").timestamp(),
        create_pendulum_time(2019, 2, 28, tz="US/Central").timestamp(),
        create_pendulum_time(2019, 3, 1, tz="US/Central").timestamp(),
    ]
Esempio n. 8
0
def test_get_single_schedule_definition(graphql_context):
    context = graphql_context
    instance = context.instance

    instance.reconcile_scheduler_state(
        external_repository=context.get_repository_location(
            main_repo_location_name()
        ).get_repository(main_repo_name()),
    )

    schedule_selector = infer_schedule_selector(context, 'partition_based_multi_mode_decorator')
    result = execute_dagster_graphql(
        context, GET_SCHEDULE_DEFINITION, variables={'scheduleSelector': schedule_selector}
    )

    assert result.data
    assert result.data['scheduleDefinitionOrError']['__typename'] == 'ScheduleDefinition'
    assert result.data['scheduleDefinitionOrError']['partitionSet']
Esempio n. 9
0
    def test_start_schedule_failure(self, graphql_context):
        assert graphql_context.read_only == True

        schedule_selector = infer_schedule_selector(
            graphql_context, "no_config_pipeline_hourly_schedule")

        # Start a single schedule
        result = execute_dagster_graphql(
            graphql_context,
            START_SCHEDULES_QUERY,
            variables={"scheduleSelector": schedule_selector},
        )

        assert not result.errors
        assert result.data

        assert result.data["startSchedule"][
            "__typename"] == "UnauthorizedError"
Esempio n. 10
0
def test_next_tick(graphql_context):
    external_repository = graphql_context.get_repository_location(
        main_repo_location_name()).get_repository(main_repo_name())
    graphql_context.instance.reconcile_scheduler_state(external_repository)

    schedule_selector = infer_schedule_selector(
        graphql_context, "no_config_pipeline_hourly_schedule")

    # Start a single schedule, future tick run requests only available for running schedules
    start_result = execute_dagster_graphql(
        graphql_context,
        START_SCHEDULES_QUERY,
        variables={"scheduleSelector": schedule_selector},
    )
    assert start_result.data["startSchedule"]["scheduleState"][
        "status"] == JobStatus.RUNNING.value

    # get schedule next tick
    result = execute_dagster_graphql(
        graphql_context,
        GET_SCHEDULE_QUERY,
        variables={"scheduleSelector": schedule_selector})

    future_ticks = result.data["scheduleOrError"]["futureTicks"]

    assert future_ticks
    assert len(future_ticks["results"]) == 3
    for tick in future_ticks["results"]:
        assert tick["evaluationResult"]
        assert tick["evaluationResult"]["runRequests"]
        assert len(tick["evaluationResult"]["runRequests"]) == 1
        assert tick["evaluationResult"]["runRequests"][0][
            "runConfigYaml"] == yaml.dump(
                {"intermediate_storage": {
                    "filesystem": {}
                }},
                default_flow_style=False,
                allow_unicode=True,
            )
Esempio n. 11
0
def test_start_without_initial_reconcile(graphql_context):

    repo_selector = infer_repository_selector(graphql_context)

    external_repository = graphql_context.get_repository_location(
        main_repo_location_name()).get_repository(main_repo_name())

    schedule_selector = infer_schedule_selector(
        graphql_context, "no_config_pipeline_hourly_schedule")

    # Start a single schedule
    start_result = execute_dagster_graphql(
        graphql_context,
        START_SCHEDULES_QUERY,
        variables={"scheduleSelector": schedule_selector},
    )
    assert start_result.data["startSchedule"]["scheduleState"][
        "status"] == JobStatus.RUNNING.value

    result = execute_dagster_graphql(
        graphql_context,
        GET_SCHEDULE_STATES_QUERY,
        variables={"repositorySelector": repo_selector},
    )

    assert result.data
    assert result.data["scheduleStatesOrError"]
    assert result.data["scheduleStatesOrError"][
        "__typename"] == "ScheduleStates"

    results = result.data["scheduleStatesOrError"]["results"]
    assert len(results) == len(external_repository.get_external_schedules())

    for schedule_state in results:
        assert (schedule_state["status"] == JobStatus.RUNNING.value
                if schedule_state["scheduleName"]
                == "no_config_pipeline_hourly_schedule" else
                JobStatus.STOPPED.value)
Esempio n. 12
0
def test_get_single_schedule_definition(graphql_context):
    context = graphql_context

    schedule_selector = infer_schedule_selector(
        context, "partition_based_multi_mode_decorator")

    # fetch schedule before reconcile
    result = execute_dagster_graphql(
        context,
        GET_SCHEDULE_QUERY,
        variables={"scheduleSelector": schedule_selector})
    assert result.data
    assert result.data["scheduleOrError"]["__typename"] == "Schedule"
    assert result.data["scheduleOrError"]["scheduleState"]

    result = execute_dagster_graphql(
        context,
        GET_SCHEDULE_QUERY,
        variables={"scheduleSelector": schedule_selector})

    assert result.data

    assert result.data["scheduleOrError"]["__typename"] == "Schedule"
    assert result.data["scheduleOrError"]["partitionSet"]
    assert result.data["scheduleOrError"]["executionTimezone"] == "UTC"

    future_ticks = result.data["scheduleOrError"]["futureTicks"]
    assert future_ticks
    assert len(future_ticks["results"]) == 3

    schedule_selector = infer_schedule_selector(context, "timezone_schedule")

    future_ticks_start_time = create_pendulum_time(
        2019, 2, 27, tz="US/Central").timestamp()

    result = execute_dagster_graphql(
        context,
        GET_SCHEDULE_QUERY,
        variables={
            "scheduleSelector": schedule_selector,
            "ticksAfter": future_ticks_start_time
        },
    )

    assert result.data
    assert result.data["scheduleOrError"]["__typename"] == "Schedule"
    assert result.data["scheduleOrError"]["executionTimezone"] == "US/Central"

    future_ticks = result.data["scheduleOrError"]["futureTicks"]
    assert future_ticks
    assert len(future_ticks["results"]) == 3
    timestamps = [
        future_tick["timestamp"] for future_tick in future_ticks["results"]
    ]

    assert timestamps == [
        create_pendulum_time(2019, 2, 27, tz="US/Central").timestamp(),
        create_pendulum_time(2019, 2, 28, tz="US/Central").timestamp(),
        create_pendulum_time(2019, 3, 1, tz="US/Central").timestamp(),
    ]

    cursor = future_ticks["cursor"]

    assert future_ticks["cursor"] == (
        create_pendulum_time(2019, 3, 1, tz="US/Central").timestamp() + 1)

    result = execute_dagster_graphql(
        context,
        GET_SCHEDULE_QUERY,
        variables={
            "scheduleSelector": schedule_selector,
            "ticksAfter": cursor
        },
    )

    future_ticks = result.data["scheduleOrError"]["futureTicks"]

    assert future_ticks
    assert len(future_ticks["results"]) == 3
    timestamps = [
        future_tick["timestamp"] for future_tick in future_ticks["results"]
    ]

    assert timestamps == [
        create_pendulum_time(2019, 3, 2, tz="US/Central").timestamp(),
        create_pendulum_time(2019, 3, 3, tz="US/Central").timestamp(),
        create_pendulum_time(2019, 3, 4, tz="US/Central").timestamp(),
    ]
Esempio n. 13
0
def test_get_single_schedule_definition(graphql_context):
    context = graphql_context
    instance = context.instance

    instance.reconcile_scheduler_state(
        external_repository=context.get_repository_location(
            main_repo_location_name()).get_repository(main_repo_name()), )

    schedule_selector = infer_schedule_selector(
        context, "partition_based_multi_mode_decorator")
    result = execute_dagster_graphql(
        context,
        GET_SCHEDULE_DEFINITION,
        variables={"scheduleSelector": schedule_selector})

    assert result.data

    assert result.data["scheduleDefinitionOrError"][
        "__typename"] == "ScheduleDefinition"
    assert result.data["scheduleDefinitionOrError"]["partitionSet"]
    assert not result.data["scheduleDefinitionOrError"]["executionTimezone"]

    future_ticks = result.data["scheduleDefinitionOrError"]["futureTicks"]
    assert future_ticks
    assert len(future_ticks["results"]) == 3

    schedule_selector = infer_schedule_selector(context, "timezone_schedule")

    future_ticks_start_time = pendulum.create(2019, 2, 27,
                                              tz="US/Central").timestamp()

    result = execute_dagster_graphql(
        context,
        GET_SCHEDULE_DEFINITION,
        variables={
            "scheduleSelector": schedule_selector,
            "ticksAfter": future_ticks_start_time
        },
    )

    assert result.data
    assert result.data["scheduleDefinitionOrError"][
        "__typename"] == "ScheduleDefinition"
    assert result.data["scheduleDefinitionOrError"][
        "executionTimezone"] == "US/Central"

    future_ticks = result.data["scheduleDefinitionOrError"]["futureTicks"]
    assert future_ticks
    assert len(future_ticks["results"]) == 3
    timestamps = [
        future_tick["timestamp"] for future_tick in future_ticks["results"]
    ]

    assert timestamps == [
        pendulum.create(2019, 2, 27, tz="US/Central").timestamp(),
        pendulum.create(2019, 2, 28, tz="US/Central").timestamp(),
        pendulum.create(2019, 3, 1, tz="US/Central").timestamp(),
    ]

    cursor = future_ticks["cursor"]

    assert future_ticks["cursor"] == (
        pendulum.create(2019, 3, 1, tz="US/Central").timestamp() + 1)

    result = execute_dagster_graphql(
        context,
        GET_SCHEDULE_DEFINITION,
        variables={
            "scheduleSelector": schedule_selector,
            "ticksAfter": cursor
        },
    )

    future_ticks = result.data["scheduleDefinitionOrError"]["futureTicks"]

    assert future_ticks
    assert len(future_ticks["results"]) == 3
    timestamps = [
        future_tick["timestamp"] for future_tick in future_ticks["results"]
    ]

    assert timestamps == [
        pendulum.create(2019, 3, 2, tz="US/Central").timestamp(),
        pendulum.create(2019, 3, 3, tz="US/Central").timestamp(),
        pendulum.create(2019, 3, 4, tz="US/Central").timestamp(),
    ]