def test_can_reload_on_external_repository_error(): with instance_for_test() as instance: with ExitStack() as exit_stack: with mock.patch( # note it where the function is *used* that needs to mocked, not # where it is defined. # see https://docs.python.org/3/library/unittest.mock.html#where-to-patch "dagster.core.host_representation.handle.sync_get_streaming_external_repositories_grpc" ) as external_repository_mock: external_repository_mock.side_effect = Exception( "get_external_repo_failure") with pytest.warns( UserWarning, match=re.escape("get_external_repo_failure")): workspace = exit_stack.enter_context( define_out_of_process_workspace(__file__, "get_repo")) assert not workspace.has_repository_location_handle( main_repo_location_name()) assert workspace.has_repository_location_error( main_repo_location_name()) process_context = WorkspaceProcessContext(workspace=workspace, instance=instance) assert len(process_context.repository_locations) == 0 workspace.reload_repository_location(main_repo_location_name()) assert workspace.has_repository_location_handle( main_repo_location_name()) process_context = WorkspaceProcessContext(workspace=workspace, instance=instance) assert len(process_context.repository_locations) == 1
def test_get_schedule_definitions_for_repository(graphql_context): selector = infer_repository_selector(graphql_context) result = execute_dagster_graphql( graphql_context, GET_SCHEDULE_DEFINITIONS_QUERY, variables={"repositorySelector": selector}, ) assert result.data assert result.data["scheduleDefinitionsOrError"] assert result.data["scheduleDefinitionsOrError"][ "__typename"] == "ScheduleDefinitions" external_repository = graphql_context.get_repository_location( main_repo_location_name()).get_repository(main_repo_name()) results = result.data["scheduleDefinitionsOrError"]["results"] assert len(results) == len(external_repository.get_external_schedules()) for schedule in results: if (schedule["name"] == "run_config_error_schedule" or schedule["name"] == "tags_error_schedule"): assert schedule["runConfigOrError"]["__typename"] == "PythonError" elif schedule["name"] == "invalid_config_schedule": assert (schedule["runConfigOrError"]["yaml"] == "solids:\n takes_an_enum:\n config: invalid\n") else: assert schedule["runConfigOrError"][ "yaml"] == "storage:\n filesystem: {}\n"
def test_get_schedule_states_for_repository_after_reconcile_using_mutation( graphql_context): selector = infer_repository_selector(graphql_context) external_repository = graphql_context.get_repository_location( main_repo_location_name()).get_repository(main_repo_name()) result = execute_dagster_graphql( graphql_context, RECONCILE_SCHEDULER_STATE_QUERY, variables={"repositorySelector": selector}, ) assert result.data assert result.data["reconcileSchedulerState"] assert result.data["reconcileSchedulerState"]["message"] == "Success" result = execute_dagster_graphql( graphql_context, GET_SCHEDULE_STATES_QUERY, variables={"repositorySelector": selector}, ) assert result.data assert result.data["scheduleStatesOrError"] assert result.data["scheduleStatesOrError"][ "__typename"] == "ScheduleStates" results = result.data["scheduleStatesOrError"]["results"] assert len(results) == len(external_repository.get_external_schedules()) for schedule_state in results: assert schedule_state["status"] == ScheduleStatus.STOPPED.value
def test_start_and_stop_schedule(graphql_context): # selector = infer_repository_selector(graphql_context) external_repository = graphql_context.get_repository_location( main_repo_location_name()).get_repository(main_repo_name()) graphql_context.instance.reconcile_scheduler_state(external_repository) schedule_selector = infer_schedule_selector( graphql_context, "no_config_pipeline_hourly_schedule") # Start a single schedule start_result = execute_dagster_graphql( graphql_context, START_SCHEDULES_QUERY, variables={"scheduleSelector": schedule_selector}, ) assert (start_result.data["startSchedule"]["scheduleState"]["status"] == ScheduleStatus.RUNNING.value) schedule_origin_id = start_result.data["startSchedule"]["scheduleState"][ "scheduleOriginId"] # Stop a single schedule stop_result = execute_dagster_graphql( graphql_context, STOP_SCHEDULES_QUERY, variables={"scheduleOriginId": schedule_origin_id}, ) assert (stop_result.data["stopRunningSchedule"]["scheduleState"]["status"] == ScheduleStatus.STOPPED.value)
def test_get_single_schedule_definition(graphql_context): context = graphql_context instance = context.instance instance.reconcile_scheduler_state( external_repository=context.get_repository_location( main_repo_location_name() ).get_repository(main_repo_name()), ) schedule_selector = infer_schedule_selector(context, "partition_based_multi_mode_decorator") result = execute_dagster_graphql( context, GET_SCHEDULE_DEFINITION, variables={"scheduleSelector": schedule_selector} ) assert result.data assert result.data["scheduleDefinitionOrError"]["__typename"] == "ScheduleDefinition" assert result.data["scheduleDefinitionOrError"]["partitionSet"] assert not result.data["scheduleDefinitionOrError"]["executionTimezone"] schedule_selector = infer_schedule_selector(context, "timezone_schedule") result = execute_dagster_graphql( context, GET_SCHEDULE_DEFINITION, variables={"scheduleSelector": schedule_selector} ) assert result.data assert result.data["scheduleDefinitionOrError"]["__typename"] == "ScheduleDefinition" assert result.data["scheduleDefinitionOrError"]["executionTimezone"] == "US/Central"
def test_next_tick_bad_schedule(graphql_context): external_repository = graphql_context.get_repository_location( main_repo_location_name()).get_repository(main_repo_name()) graphql_context.instance.reconcile_scheduler_state(external_repository) schedule_selector = infer_schedule_selector(graphql_context, "run_config_error_schedule") # Start a single schedule, future tick run requests only available for running schedules start_result = execute_dagster_graphql( graphql_context, START_SCHEDULES_QUERY, variables={"scheduleSelector": schedule_selector}, ) assert start_result.data["startSchedule"]["scheduleState"][ "status"] == JobStatus.RUNNING.value # get schedule next tick result = execute_dagster_graphql( graphql_context, GET_SCHEDULE_QUERY, variables={"scheduleSelector": schedule_selector}) future_ticks = result.data["scheduleOrError"]["futureTicks"] assert future_ticks assert len(future_ticks["results"]) == 3 for tick in future_ticks["results"]: assert tick["evaluationResult"] assert not tick["evaluationResult"]["runRequests"] assert not tick["evaluationResult"]["skipReason"] assert tick["evaluationResult"]["error"]
def get_main_external_repo(): with location_origin_from_python_file( python_file=file_relative_path(__file__, "setup.py"), attribute=main_repo_name(), working_directory=None, location_name=main_repo_location_name(), ).create_test_location() as location: yield location.get_repository(main_repo_name())
def get_main_external_repo(instance): return RepositoryLocation.from_handle( location_handle_from_python_file( python_file=file_relative_path(__file__, "setup.py"), attribute=main_repo_name(), working_directory=None, user_process_api=python_user_process_api_from_instance(instance), location_name=main_repo_location_name(), )).get_repository(main_repo_name())
def get_main_external_repo(): with RepositoryLocationHandle.create_from_repository_location_origin( location_origin_from_python_file( python_file=file_relative_path(__file__, "setup.py"), attribute=main_repo_name(), working_directory=None, location_name=main_repo_location_name(), )) as handle: yield RepositoryLocation.from_handle(handle).get_repository( main_repo_name())
def get_main_workspace(instance): with WorkspaceProcessContext( instance, PythonFileTarget( python_file=file_relative_path(__file__, "setup.py"), attribute=main_repo_name(), working_directory=None, location_name=main_repo_location_name(), ), ) as workspace_process_context: yield workspace_process_context.create_request_context()
def test_sensor_next_ticks(graphql_context): external_repository = graphql_context.get_repository_location( main_repo_location_name() ).get_repository(main_repo_name()) graphql_context.instance.reconcile_scheduler_state(external_repository) sensor_name = "always_no_config_sensor" external_sensor = external_repository.get_external_sensor(sensor_name) sensor_selector = infer_sensor_selector(graphql_context, sensor_name) result = execute_dagster_graphql( graphql_context, GET_SENSOR_QUERY, variables={"sensorSelector": sensor_selector} ) # test default sensor off assert result.data assert result.data["sensorOrError"]["__typename"] == "Sensor" next_tick = result.data["sensorOrError"]["nextTick"] assert not next_tick # test default sensor with no tick graphql_context.instance.add_job_state( JobState(external_sensor.get_external_origin(), JobType.SENSOR, JobStatus.RUNNING) ) result = execute_dagster_graphql( graphql_context, GET_SENSOR_QUERY, variables={"sensorSelector": sensor_selector} ) assert result.data assert len(result.data["sensorOrError"]["sensorState"]["ticks"]) == 0 assert result.data["sensorOrError"]["__typename"] == "Sensor" next_tick = result.data["sensorOrError"]["nextTick"] assert not next_tick # test default sensor with last tick list( execute_sensor_iteration( graphql_context.instance, get_default_daemon_logger("SensorDaemon") ) ) result = execute_dagster_graphql( graphql_context, GET_SENSOR_QUERY, variables={"sensorSelector": sensor_selector} ) assert len(result.data["sensorOrError"]["sensorState"]["ticks"]) == 1 assert result.data assert result.data["sensorOrError"]["__typename"] == "Sensor" next_tick = result.data["sensorOrError"]["nextTick"] assert next_tick
def test_sensor_next_ticks(graphql_context): external_repository = graphql_context.get_repository_location( main_repo_location_name()).get_repository(main_repo_name()) sensor_name = "always_no_config_sensor" external_sensor = external_repository.get_external_sensor(sensor_name) sensor_selector = infer_sensor_selector(graphql_context, sensor_name) result = execute_dagster_graphql( graphql_context, GET_SENSOR_QUERY, variables={"sensorSelector": sensor_selector}) # test default sensor off assert result.data assert result.data["sensorOrError"]["__typename"] == "Sensor" next_tick = result.data["sensorOrError"]["nextTick"] assert not next_tick # test default sensor with no tick graphql_context.instance.add_instigator_state( InstigatorState(external_sensor.get_external_origin(), InstigatorType.SENSOR, InstigatorStatus.RUNNING)) result = execute_dagster_graphql( graphql_context, GET_SENSOR_QUERY, variables={"sensorSelector": sensor_selector}) assert result.data assert len(result.data["sensorOrError"]["sensorState"]["ticks"]) == 0 assert result.data["sensorOrError"]["__typename"] == "Sensor" next_tick = result.data["sensorOrError"]["nextTick"] assert not next_tick # test default sensor with last tick _create_tick(graphql_context) result = execute_dagster_graphql( graphql_context, GET_SENSOR_QUERY, variables={"sensorSelector": sensor_selector}) assert len(result.data["sensorOrError"]["sensorState"]["ticks"]) == 1 assert result.data assert result.data["sensorOrError"]["__typename"] == "Sensor" next_tick = result.data["sensorOrError"]["nextTick"] assert next_tick
def test_fetch_snapshot_or_error_by_active_pipeline_name_not_found(graphql_context, snapshot): result = execute_dagster_graphql( graphql_context, SNAPSHOT_OR_ERROR_QUERY_BY_PIPELINE_NAME, { "activePipelineSelector": { "pipelineName": "jkdjfkdj", "repositoryName": main_repo_name(), "repositoryLocationName": main_repo_location_name(), } }, ) assert not result.errors assert result.data assert result.data["pipelineSnapshotOrError"]["__typename"] == "PipelineNotFoundError" snapshot.assert_match(pretty_dump(result.data))
def test_get_schedule_states_for_repository_with_removed_schedule_definitions(graphql_context): selector = infer_repository_selector(graphql_context) external_repository = graphql_context.get_repository_location( main_repo_location_name() ).get_repository(main_repo_name()) graphql_context.instance.reconcile_scheduler_state(external_repository) result = execute_dagster_graphql( graphql_context, GET_SCHEDULE_STATES_WITHOUT_DEFINITIONS_QUERY, variables={"repositorySelector": selector}, ) assert result.data["scheduleStatesOrError"] assert result.data["scheduleStatesOrError"]["__typename"] == "ScheduleStates" results = result.data["scheduleStatesOrError"]["results"] assert len(results) == 0
def test_get_schedule_definitions_for_repository(graphql_context): selector = infer_repository_selector(graphql_context) result = execute_dagster_graphql( graphql_context, GET_SCHEDULES_QUERY, variables={"repositorySelector": selector}, ) assert result.data assert result.data["schedulesOrError"] assert result.data["schedulesOrError"]["__typename"] == "Schedules" external_repository = graphql_context.get_repository_location( main_repo_location_name() ).get_repository(main_repo_name()) results = result.data["schedulesOrError"]["results"] assert len(results) == len(external_repository.get_external_schedules()) for schedule in results: if schedule["name"] == "timezone_schedule": assert schedule["executionTimezone"] == "US/Central"
def test_schedule_next_tick(graphql_context): external_repository = graphql_context.get_repository_location( main_repo_location_name()).get_repository(main_repo_name()) graphql_context.instance.reconcile_scheduler_state(external_repository) schedule_name = "no_config_pipeline_hourly_schedule" external_schedule = external_repository.get_external_schedule( schedule_name) job_selector = infer_job_selector(graphql_context, schedule_name) # need to be running in order to generate a future tick graphql_context.instance.start_schedule_and_update_storage_state( external_schedule) result = execute_dagster_graphql(graphql_context, GET_JOB_QUERY, variables={"jobSelector": job_selector}) assert result.data assert result.data["jobStateOrError"]["__typename"] == "JobState" next_tick = result.data["jobStateOrError"]["nextTick"] assert next_tick
def test_sensor_next_tick(graphql_context): external_repository = graphql_context.get_repository_location( main_repo_location_name()).get_repository(main_repo_name()) graphql_context.instance.reconcile_scheduler_state(external_repository) sensor_name = "always_no_config_sensor" external_sensor = external_repository.get_external_sensor(sensor_name) job_selector = infer_job_selector(graphql_context, sensor_name) # need to be running and create a sensor tick in the last 30 seconds in order to generate a # future tick graphql_context.instance.start_sensor(external_sensor) _create_sensor_tick(graphql_context.instance) result = execute_dagster_graphql(graphql_context, GET_JOB_QUERY, variables={"jobSelector": job_selector}) assert result.data assert result.data["jobStateOrError"]["__typename"] == "JobState" next_tick = result.data["jobStateOrError"]["nextTick"] assert next_tick
def test_next_tick(graphql_context): external_repository = graphql_context.get_repository_location( main_repo_location_name()).get_repository(main_repo_name()) graphql_context.instance.reconcile_scheduler_state(external_repository) schedule_selector = infer_schedule_selector( graphql_context, "no_config_pipeline_hourly_schedule") # Start a single schedule, future tick run requests only available for running schedules start_result = execute_dagster_graphql( graphql_context, START_SCHEDULES_QUERY, variables={"scheduleSelector": schedule_selector}, ) assert start_result.data["startSchedule"]["scheduleState"][ "status"] == JobStatus.RUNNING.value # get schedule next tick result = execute_dagster_graphql( graphql_context, GET_SCHEDULE_QUERY, variables={"scheduleSelector": schedule_selector}) future_ticks = result.data["scheduleOrError"]["futureTicks"] assert future_ticks assert len(future_ticks["results"]) == 3 for tick in future_ticks["results"]: assert tick["evaluationResult"] assert tick["evaluationResult"]["runRequests"] assert len(tick["evaluationResult"]["runRequests"]) == 1 assert tick["evaluationResult"]["runRequests"][0][ "runConfigYaml"] == yaml.dump( {"intermediate_storage": { "filesystem": {} }}, default_flow_style=False, allow_unicode=True, )
def test_start_without_initial_reconcile(graphql_context): repo_selector = infer_repository_selector(graphql_context) external_repository = graphql_context.get_repository_location( main_repo_location_name()).get_repository(main_repo_name()) schedule_selector = infer_schedule_selector( graphql_context, "no_config_pipeline_hourly_schedule") # Start a single schedule start_result = execute_dagster_graphql( graphql_context, START_SCHEDULES_QUERY, variables={"scheduleSelector": schedule_selector}, ) assert start_result.data["startSchedule"]["scheduleState"][ "status"] == JobStatus.RUNNING.value result = execute_dagster_graphql( graphql_context, GET_SCHEDULE_STATES_QUERY, variables={"repositorySelector": repo_selector}, ) assert result.data assert result.data["scheduleStatesOrError"] assert result.data["scheduleStatesOrError"][ "__typename"] == "ScheduleStates" results = result.data["scheduleStatesOrError"]["results"] assert len(results) == len(external_repository.get_external_schedules()) for schedule_state in results: assert (schedule_state["status"] == JobStatus.RUNNING.value if schedule_state["scheduleName"] == "no_config_pipeline_hourly_schedule" else JobStatus.STOPPED.value)
def test_sensor_tick_range(graphql_context): external_repository = graphql_context.get_repository_location( main_repo_location_name() ).get_repository(main_repo_name()) graphql_context.instance.reconcile_scheduler_state(external_repository) sensor_name = "always_no_config_sensor" external_sensor = external_repository.get_external_sensor(sensor_name) sensor_selector = infer_sensor_selector(graphql_context, sensor_name) # test with no job state result = execute_dagster_graphql( graphql_context, GET_SENSOR_TICK_RANGE_QUERY, variables={"sensorSelector": sensor_selector, "dayRange": None, "dayOffset": None}, ) assert len(result.data["sensorOrError"]["sensorState"]["ticks"]) == 0 # turn the sensor on graphql_context.instance.add_job_state( JobState(external_sensor.get_external_origin(), JobType.SENSOR, JobStatus.RUNNING) ) now = pendulum.now().in_tz("US/Central") one = now.subtract(days=2).subtract(hours=1) with pendulum.test(one): _create_tick(graphql_context.instance) two = now.subtract(days=1).subtract(hours=1) with pendulum.test(two): _create_tick(graphql_context.instance) three = now.subtract(hours=1) with pendulum.test(three): _create_tick(graphql_context.instance) result = execute_dagster_graphql( graphql_context, GET_SENSOR_TICK_RANGE_QUERY, variables={"sensorSelector": sensor_selector, "dayRange": None, "dayOffset": None}, ) assert len(result.data["sensorOrError"]["sensorState"]["ticks"]) == 3 result = execute_dagster_graphql( graphql_context, GET_SENSOR_TICK_RANGE_QUERY, variables={"sensorSelector": sensor_selector, "dayRange": 1, "dayOffset": None}, ) assert len(result.data["sensorOrError"]["sensorState"]["ticks"]) == 1 assert result.data["sensorOrError"]["sensorState"]["ticks"][0]["timestamp"] == three.timestamp() result = execute_dagster_graphql( graphql_context, GET_SENSOR_TICK_RANGE_QUERY, variables={"sensorSelector": sensor_selector, "dayRange": 1, "dayOffset": 1}, ) assert len(result.data["sensorOrError"]["sensorState"]["ticks"]) == 1 assert result.data["sensorOrError"]["sensorState"]["ticks"][0]["timestamp"] == two.timestamp() result = execute_dagster_graphql( graphql_context, GET_SENSOR_TICK_RANGE_QUERY, variables={ "sensorSelector": sensor_selector, "dayRange": 2, "dayOffset": None, }, ) assert len(result.data["sensorOrError"]["sensorState"]["ticks"]) == 2
def test_get_single_schedule_definition(graphql_context): context = graphql_context instance = context.instance instance.reconcile_scheduler_state( external_repository=context.get_repository_location( main_repo_location_name()).get_repository(main_repo_name()), ) schedule_selector = infer_schedule_selector( context, "partition_based_multi_mode_decorator") result = execute_dagster_graphql( context, GET_SCHEDULE_DEFINITION, variables={"scheduleSelector": schedule_selector}) assert result.data assert result.data["scheduleDefinitionOrError"][ "__typename"] == "ScheduleDefinition" assert result.data["scheduleDefinitionOrError"]["partitionSet"] assert not result.data["scheduleDefinitionOrError"]["executionTimezone"] future_ticks = result.data["scheduleDefinitionOrError"]["futureTicks"] assert future_ticks assert len(future_ticks["results"]) == 3 schedule_selector = infer_schedule_selector(context, "timezone_schedule") future_ticks_start_time = pendulum.create(2019, 2, 27, tz="US/Central").timestamp() result = execute_dagster_graphql( context, GET_SCHEDULE_DEFINITION, variables={ "scheduleSelector": schedule_selector, "ticksAfter": future_ticks_start_time }, ) assert result.data assert result.data["scheduleDefinitionOrError"][ "__typename"] == "ScheduleDefinition" assert result.data["scheduleDefinitionOrError"][ "executionTimezone"] == "US/Central" future_ticks = result.data["scheduleDefinitionOrError"]["futureTicks"] assert future_ticks assert len(future_ticks["results"]) == 3 timestamps = [ future_tick["timestamp"] for future_tick in future_ticks["results"] ] assert timestamps == [ pendulum.create(2019, 2, 27, tz="US/Central").timestamp(), pendulum.create(2019, 2, 28, tz="US/Central").timestamp(), pendulum.create(2019, 3, 1, tz="US/Central").timestamp(), ] cursor = future_ticks["cursor"] assert future_ticks["cursor"] == ( pendulum.create(2019, 3, 1, tz="US/Central").timestamp() + 1) result = execute_dagster_graphql( context, GET_SCHEDULE_DEFINITION, variables={ "scheduleSelector": schedule_selector, "ticksAfter": cursor }, ) future_ticks = result.data["scheduleDefinitionOrError"]["futureTicks"] assert future_ticks assert len(future_ticks["results"]) == 3 timestamps = [ future_tick["timestamp"] for future_tick in future_ticks["results"] ] assert timestamps == [ pendulum.create(2019, 3, 2, tz="US/Central").timestamp(), pendulum.create(2019, 3, 3, tz="US/Central").timestamp(), pendulum.create(2019, 3, 4, tz="US/Central").timestamp(), ]
def get_main_external_repo(instance): with get_main_workspace(instance) as workspace: location = workspace.get_repository_location(main_repo_location_name()) yield location.get_repository(main_repo_name())
def test_sensor_ticks_filtered(graphql_context): external_repository = graphql_context.get_repository_location( main_repo_location_name()).get_repository(main_repo_name()) sensor_name = "always_no_config_sensor" external_sensor = external_repository.get_external_sensor(sensor_name) sensor_selector = infer_sensor_selector(graphql_context, sensor_name) # turn the sensor on graphql_context.instance.add_instigator_state( InstigatorState(external_sensor.get_external_origin(), InstigatorType.SENSOR, InstigatorStatus.RUNNING)) now = pendulum.now("US/Central") with pendulum.test(now): _create_tick(graphql_context) # create a success tick # create a started tick graphql_context.instance.create_tick( TickData( instigator_origin_id=external_sensor.get_external_origin().get_id( ), instigator_name=sensor_name, instigator_type=InstigatorType.SENSOR, status=TickStatus.STARTED, timestamp=now.timestamp(), selector_id=external_sensor.selector_id, )) # create a skipped tick graphql_context.instance.create_tick( TickData( instigator_origin_id=external_sensor.get_external_origin().get_id( ), instigator_name=sensor_name, instigator_type=InstigatorType.SENSOR, status=TickStatus.SKIPPED, timestamp=now.timestamp(), selector_id=external_sensor.selector_id, )) # create a failed tick graphql_context.instance.create_tick( TickData( instigator_origin_id=external_sensor.get_external_origin().get_id( ), instigator_name=sensor_name, instigator_type=InstigatorType.SENSOR, status=TickStatus.FAILURE, timestamp=now.timestamp(), error=SerializableErrorInfo(message="foobar", stack=[], cls_name=None, cause=None), selector_id=external_sensor.selector_id, )) result = execute_dagster_graphql( graphql_context, GET_TICKS_QUERY, variables={"sensorSelector": sensor_selector}, ) assert len(result.data["sensorOrError"]["sensorState"]["ticks"]) == 4 result = execute_dagster_graphql( graphql_context, GET_TICKS_QUERY, variables={ "sensorSelector": sensor_selector, "statuses": ["STARTED"] }, ) assert len(result.data["sensorOrError"]["sensorState"]["ticks"]) == 1 assert result.data["sensorOrError"]["sensorState"]["ticks"][0][ "status"] == "STARTED" result = execute_dagster_graphql( graphql_context, GET_TICKS_QUERY, variables={ "sensorSelector": sensor_selector, "statuses": ["FAILURE"] }, ) assert len(result.data["sensorOrError"]["sensorState"]["ticks"]) == 1 assert result.data["sensorOrError"]["sensorState"]["ticks"][0][ "status"] == "FAILURE" result = execute_dagster_graphql( graphql_context, GET_TICKS_QUERY, variables={ "sensorSelector": sensor_selector, "statuses": ["SKIPPED"] }, ) assert len(result.data["sensorOrError"]["sensorState"]["ticks"]) == 1 assert result.data["sensorOrError"]["sensorState"]["ticks"][0][ "status"] == "SKIPPED"