def test_ignores_commands_with_incorrect_service_id(docker_compose_multiple_instances):
    producer = create_producer()
    sleep(20)
    service_id_1 = "filewriter1"
    service_id_2 = "filewriter2"
    command_topic = "TEST_writerCommandMultiple"
    job_id = publish_run_start_message(
        producer,
        "commands/nexus_structure.json",
        nexus_filename="output_file_ignores_stop_1.nxs",
        topic=command_topic,
        service_id=service_id_1,
    )
    publish_run_start_message(
        producer,
        "commands/nexus_structure.json",
        nexus_filename="output_file_ignores_stop_2.nxs",
        topic=command_topic,
        service_id=service_id_2,
    )

    sleep(10)

    publish_run_stop_message(
        producer, job_id, topic=command_topic, service_id=service_id_2
    )

    consumer = create_consumer()
    consumer.subscribe(["TEST_writerStatus2"])

    # Poll a few times on the status topic to see if the filewriter2 has stopped writing.
    stopped = False
    maximum_tries = 30
    for i in range(maximum_tries):
        msg = consumer.poll()
        if msg is None or msg.error():
            continue
        status_info = deserialise_x5f2(msg.value())
        if json.loads(status_info.status_json)["file_being_written"] == "":
            # Filewriter2 is not currently writing a file => stop command has been processed.
            stopped = True
            break
        if i == maximum_tries - 1:
            pytest.fail("filewriter2 failed to stop after being sent stop message")
        sleep(1)

    assert stopped

    sleep(5)
    consumer.unsubscribe()
    consumer.subscribe(["TEST_writerStatus1"])
    writer1msg = consumer.poll()

    # Check filewriter1's job queue is not empty
    status_info = deserialise_x5f2(writer1msg.value())
    assert json.loads(json.loads(status_info.status_json))["file_being_written"] != ""
예제 #2
0
def test_filewriter_clears_stop_time_between_jobs(docker_compose_stop_command):
    producer = create_producer()
    start_time = unix_time_milliseconds(datetime.utcnow()) - 1000
    stop_time = start_time + 1000
    # Ensure TEST_sampleEnv topic exists
    publish_f142_message(producer, "TEST_sampleEnv",
                         int(unix_time_milliseconds(datetime.utcnow())))
    check(producer.flush(5) == 0, "Unable to flush kafka messages.")

    topic = "TEST_writerCommand"
    publish_run_start_message(
        producer,
        "commands/nexus_structure.json",
        "output_file_with_stop_time.nxs",
        topic=topic,
        job_id="should_start_then_stop",
        start_time=int(start_time),
        stop_time=int(stop_time),
    )
    check(producer.flush(5) == 0, "Unable to flush kafka messages.")
    sleep(30)
    job_id = publish_run_start_message(
        producer,
        "commands/nexus_structure.json",
        "output_file_no_stop_time.nxs",
        topic=topic,
        job_id="should_start_but_not_stop",
    )
    check(producer.flush(5) == 0, "Unable to flush kafka messages.")
    sleep(30)
    msgs = consume_everything("TEST_writerStatus")

    stopped = False
    started = False
    message = msgs[-1]
    status_info = deserialise_x5f2(message.value())
    message = json.loads(status_info.status_json)
    if message["start_time"] > 0 and message["job_id"] == job_id:
        started = True
    if message["stop_time"] == 0 and message["job_id"] == "":
        stopped = True

    assert started
    assert not stopped

    # Clean up by stopping writing
    publish_run_stop_message(producer, job_id=job_id)
    check(producer.flush(5) == 0, "Unable to flush kafka messages.")
    sleep(3)
def test_static_data_reaches_file(docker_compose):
    producer = create_producer()
    sleep(10)
    # Start file writing
    job_id = publish_run_start_message(
        producer,
        "commands/nexus_structure_static.json",
        "output_file_static.nxs",
        start_time=int(docker_compose),
    )

    # Give it some time to accumulate data
    sleep(10)
    # Stop file writing
    publish_run_stop_message(producer, job_id=job_id)

    filepath = "output-files/output_file_static.nxs"
    with OpenNexusFileWhenAvailable(filepath) as file:
        assert not file.swmr_mode
        assert file["entry/start_time"][()] == "2016-04-12T02:58:52"
        assert file["entry/end_time"][()] == "2016-04-12T03:29:11"
        assert file["entry/duration"][()] == 1817.0
        assert file["entry/features"][0] == 10138143369737381149
        assert file["entry/user_1/affiliation"][()] == "ISIS, STFC"
        assert np.allclose(
            file["entry/instrument/monitor1/transformations/location"].
            attrs["vector"],
            np.array([0.0, 0.0, -1.0]),
        )
        assert (file["entry/instrument/monitor1/transformations/location"].
                attrs["transformation_type"] == "translation")
예제 #4
0
def test_two_different_writer_modules_with_same_flatbuffer_id(docker_compose):
    producer = create_producer()
    start_time = unix_time_milliseconds(datetime.utcnow()) - 10000
    for i in range(10):
        publish_f142_message(
            producer,
            "TEST_sampleEnv",
            int(start_time + i * 1000),
            source_name="test_source_1",
        )
        publish_f142_message(
            producer,
            "TEST_sampleEnv",
            int(start_time + i * 1000),
            source_name="test_source_2",
        )
    check(producer.flush(5) == 0, "Unable to flush kafka messages.")
    # Start file writing
    publish_run_start_message(
        producer,
        "commands/nexus_structure_multiple_modules.json",
        "output_file_multiple_modules.nxs",
        start_time=int(start_time),
        stop_time=int(start_time + 5 * 1000),
    )
    # Give it some time to accumulate data
    sleep(10)

    filepath = "output-files/output_file_multiple_modules.nxs"
    with OpenNexusFileWhenAvailable(filepath) as file:
        assert (
            len(file["entry/sample/dataset1/time"][:]) > 0
            and len(file["entry/sample/dataset1/value"][:]) > 0
        ), "f142 module should have written this dataset, it should have written a value and time"

        assert (
            "cue_timestamp_zero" not in file["entry/sample/dataset2"]
        ), "f142_test module should have written this dataset, it writes cue_index but no cue_timestamp_zero"
        assert (len(file["entry/sample/dataset2/cue_index"][:]) >
                0), "Expected index values, found none."
        for i in range(len(file["entry/sample/dataset2/cue_index"][:])):
            assert (file["entry/sample/dataset2/cue_index"][i] == i
                    ), "Expect consecutive integers to be written by f142_test"
예제 #5
0
def test_ep00(docker_compose):
    producer = create_producer()
    topic = "TEST_epicsConnectionStatus"
    sleep(10)

    # Start file writing
    job_id = publish_run_start_message(
        producer,
        "commands/nexus_structure_epics_status.json",
        "output_file_ep00.nxs",
        start_time=current_unix_time_ms(),
    )
    sleep(5)
    first_timestamp = current_unix_time_ms()
    publish_ep00_message(producer, topic, EventType.NEVER_CONNECTED,
                         first_timestamp)
    second_timestamp = current_unix_time_ms()
    publish_ep00_message(producer,
                         topic,
                         EventType.CONNECTED,
                         kafka_timestamp=second_timestamp)

    # Give it some time to accumulate data
    sleep(10)

    # Stop file writing
    publish_run_stop_message(producer,
                             job_id,
                             stop_time=current_unix_time_ms())

    filepath = "output-files/output_file_ep00.nxs"
    with OpenNexusFileWhenAvailable(filepath) as file:
        assert file["EpicsConnectionStatus/connection_status_time"][
            0] == milliseconds_to_nanoseconds(first_timestamp)
        assert file["EpicsConnectionStatus/connection_status"][
            0] == b"NEVER_CONNECTED"
        assert file["EpicsConnectionStatus/connection_status_time"][
            1] == milliseconds_to_nanoseconds(second_timestamp)
        assert file["EpicsConnectionStatus/connection_status"][
            1] == b"CONNECTED"
def test_long_run(docker_compose_long_running):
    producer = create_producer()
    sleep(20)
    # Start file writing
    job_id = publish_run_start_message(
        producer,
        "commands/nexus_structure_long_running.json",
        nexus_filename="output_file_lr.nxs",
        topic="TEST_writerCommandLR",
        start_time=int(docker_compose_long_running),
    )
    sleep(10)
    # Minimum length of the test is determined by (pv_updates * 3) + 10 seconds
    pv_updates = 6000
    # range is exclusive of the last number, so in order to get 1 to pv_updates we need to use pv_updates+1
    for i in range(1, pv_updates + 1):
        change_pv_value("SIMPLE:DOUBLE", i)
        sleep(3)

    publish_run_stop_message(producer,
                             job_id=job_id,
                             topic="TEST_writerCommandLR")
    sleep(30)

    filepath = "output-files/output_file_lr.nxs"
    with OpenNexusFileWhenAvailable(filepath) as file:
        counter = 1
        # check values are contiguous
        for value in file["entry/cont_data/value"]:
            assert isclose(value, counter)
            counter += 1

    # check that the last value is the same as the number of updates
    assert counter == pv_updates + 1

    with open("logs/lr_status_messages.log", "w+") as file:
        status_messages = consume_everything("TEST_writerStatus")
        for msg in status_messages:
            file.write(str(deserialise_x5f2(msg.value())) + "\n")
def test_ignores_commands_with_incorrect_job_id(docker_compose):
    producer = create_producer()
    sleep(10)

    # Ensure TEST_sampleEnv topic exists
    publish_f142_message(
        producer, "TEST_sampleEnv", int(unix_time_milliseconds(datetime.utcnow()))
    )

    sleep(10)

    # Start file writing
    job_id = publish_run_start_message(
        producer,
        "commands/nexus_structure.json",
        "output_file_jobid.nxs",
        start_time=int(docker_compose),
    )

    sleep(10)

    # Request stop but with slightly wrong job_id
    publish_run_stop_message(producer, job_id[:-1])

    msgs = consume_everything("TEST_writerStatus")

    # Poll a few times on the status topic and check the final message read
    # indicates that is still running
    message = msgs[-1]
    status_info = deserialise_x5f2(message.value())
    message = json.loads(status_info.status_json)
    if message["file_being_written"] == "":
        running = False
    else:
        running = True

    assert running
예제 #8
0
                    alarm_severity=AlarmSeverity.NO_ALARM,
                )
            else:
                publish_f142_message(producer, data_topic,
                                     time_in_ms_after_epoch)
    check(producer.flush(5) == 0, "Unable to flush kafka messages.")
    sleep(5)

    command_topic = "TEST_writerCommand"
    start_time = 1_560_330_000_002
    stop_time = 1_560_330_000_148
    # Ask to write 147 messages from the middle of the 200 messages we published
    publish_run_start_message(
        producer,
        "commands/nexus_structure_historical.json",
        "output_file_of_historical_data.nxs",
        start_time=start_time,
        stop_time=stop_time,
        topic=command_topic,
    )

    sleep(20)
    # The command also includes a stream for topic TEST_emptyTopic which exists but has no data in it, the
    # file writer should recognise there is no data in that topic and close the corresponding streamer without problem.
    filepath = "output-files/output_file_of_historical_data.nxs"
    with OpenNexusFileWhenAvailable(filepath) as file:
        # Expect to have recorded one value per ms between the start and stop time
        # +3 due to writing one message before start and one message after stop
        assert file["entry/historical_data_1/time"].len() == (
            stop_time - start_time + 3
        ), "Expected there to be one message per millisecond recorded between specified start and stop time"
        assert file["entry/historical_data_2/time"].len() == (