def test_data_reaches_file(test_environment): """ This 'test' performs the job which NICOS will do in the production system at the ESS; sending the 'command' messages for the file writer. :param test_environment: This is the test fixture which launches the containers """ producer = create_producer() sleep(5) # Start file writing send_writer_command("commands/example-json-command.json", producer) producer.flush() # Give it some time to accumulate data sleep(10) # Stop file writing send_writer_command("commands/stop-command.json", producer) sleep(5) send_writer_command("commands/writer-exit.json", producer) producer.flush() # Allow time for the file writing to complete sleep(5)
def test_static_data_reaches_file(docker_compose): producer = create_producer() sleep(10) # Start file writing job_id = publish_run_start_message( producer, "commands/nexus_structure_static.json", "output_file_static.nxs", start_time=int(docker_compose), ) # Give it some time to accumulate data sleep(10) # Stop file writing publish_run_stop_message(producer, job_id=job_id) filepath = "output-files/output_file_static.nxs" with OpenNexusFileWhenAvailable(filepath) as file: assert not file.swmr_mode assert file["entry/start_time"][()] == "2016-04-12T02:58:52" assert file["entry/end_time"][()] == "2016-04-12T03:29:11" assert file["entry/duration"][()] == 1817.0 assert file["entry/features"][0] == 10138143369737381149 assert file["entry/user_1/affiliation"][()] == "ISIS, STFC" assert np.allclose( file["entry/instrument/monitor1/transformations/location"]. attrs["vector"], np.array([0.0, 0.0, -1.0]), ) assert (file["entry/instrument/monitor1/transformations/location"]. attrs["transformation_type"] == "translation")
def test_filewriter_can_write_data_when_start_and_stop_time_are_in_the_past( docker_compose_stop_command, ): producer = create_producer() data_topics = ["TEST_historicalData1", "TEST_historicalData2"] first_alarm_change_time_ms = 1_560_330_000_050 second_alarm_change_time_ms = 1_560_330_000_060 # Publish some data with timestamps in the past(these are from 2019 - 06 - 12) for data_topic in data_topics: for time_in_ms_after_epoch in range(1_560_330_000_000, 1_560_330_000_200): if time_in_ms_after_epoch == first_alarm_change_time_ms: # EPICS alarm goes into HIGH state publish_f142_message( producer, data_topic, time_in_ms_after_epoch, alarm_status=AlarmStatus.HIGH, alarm_severity=AlarmSeverity.MAJOR, ) elif time_in_ms_after_epoch == second_alarm_change_time_ms: # EPICS alarm returns to NO_ALARM publish_f142_message( producer, data_topic, time_in_ms_after_epoch, alarm_status=AlarmStatus.NO_ALARM, alarm_severity=AlarmSeverity.NO_ALARM, ) else: publish_f142_message(producer, data_topic, time_in_ms_after_epoch)
def test_ignores_commands_with_incorrect_id(docker_compose_multiple_instances): producer = create_producer() sleep(20) send_writer_command("commands/add-command-never-ends.json", producer) send_writer_command("commands/add-command-never-ends2.json", producer) sleep(10) send_writer_command("commands/writer-stop-single.json", producer) consumer = create_consumer() consumer.subscribe(["TEST_writerStatus2"]) # poll a few times on the status topic to see if the filewriter2 has stopped writing files. stopped = False for i in range(30): msg = consumer.poll() if b"\"files\":{}" in msg.value(): # filewriter2 is not currently writing a file - stop command has been processed. stopped = True break sleep(1) assert stopped sleep(5) consumer.unsubscribe() consumer.subscribe(["TEST_writerStatus1"]) writer1msg = consumer.poll() # Check filewriter1's job queue is not empty assert b"\"files\":{}" not in writer1msg.value()
def test_filewriter_clears_stop_time( docker_compose_stop_command_does_not_persist): producer = create_producer() sleep(10) topic = "TEST_writerCommand" send_writer_command("commands/commandwithstoptime.json", producer, topic=topic, stop_time=str( int(unix_time_milliseconds(datetime.utcnow())))) sleep(10) send_writer_command("commands/commandwithnostoptime.json", producer, topic=topic) sleep(10) msgs = poll_everything("TEST_writerStatus") stopped = False started = False for message in msgs: message = str(message.value(), encoding='utf-8') if "\"code\":\"START\"" in message and\ "\"job_id\":\"a8e31c99-8df9-4123-8060-2e009d84a0df\"" in message: started = True if "\"code\":\"CLOSE\"" in message and\ "\"job_id\":\"a8e31c99-8df9-4123-8060-2e009d84a0df\"" in message: stopped = True assert started assert not stopped
def test_ignores_commands_with_incorrect_service_id(docker_compose_multiple_instances): producer = create_producer() sleep(20) service_id_1 = "filewriter1" service_id_2 = "filewriter2" command_topic = "TEST_writerCommandMultiple" job_id = publish_run_start_message( producer, "commands/nexus_structure.json", nexus_filename="output_file_ignores_stop_1.nxs", topic=command_topic, service_id=service_id_1, ) publish_run_start_message( producer, "commands/nexus_structure.json", nexus_filename="output_file_ignores_stop_2.nxs", topic=command_topic, service_id=service_id_2, ) sleep(10) publish_run_stop_message( producer, job_id, topic=command_topic, service_id=service_id_2 ) consumer = create_consumer() consumer.subscribe(["TEST_writerStatus2"]) # Poll a few times on the status topic to see if the filewriter2 has stopped writing. stopped = False maximum_tries = 30 for i in range(maximum_tries): msg = consumer.poll() if msg is None or msg.error(): continue status_info = deserialise_x5f2(msg.value()) if json.loads(status_info.status_json)["file_being_written"] == "": # Filewriter2 is not currently writing a file => stop command has been processed. stopped = True break if i == maximum_tries - 1: pytest.fail("filewriter2 failed to stop after being sent stop message") sleep(1) assert stopped sleep(5) consumer.unsubscribe() consumer.subscribe(["TEST_writerStatus1"]) writer1msg = consumer.poll() # Check filewriter1's job queue is not empty status_info = deserialise_x5f2(writer1msg.value()) assert json.loads(json.loads(status_info.status_json))["file_being_written"] != ""
def test_filewriter_clears_stop_time_between_jobs(docker_compose_stop_command): producer = create_producer() start_time = unix_time_milliseconds(datetime.utcnow()) - 1000 stop_time = start_time + 1000 # Ensure TEST_sampleEnv topic exists publish_f142_message(producer, "TEST_sampleEnv", int(unix_time_milliseconds(datetime.utcnow()))) check(producer.flush(5) == 0, "Unable to flush kafka messages.") topic = "TEST_writerCommand" publish_run_start_message( producer, "commands/nexus_structure.json", "output_file_with_stop_time.nxs", topic=topic, job_id="should_start_then_stop", start_time=int(start_time), stop_time=int(stop_time), ) check(producer.flush(5) == 0, "Unable to flush kafka messages.") sleep(30) job_id = publish_run_start_message( producer, "commands/nexus_structure.json", "output_file_no_stop_time.nxs", topic=topic, job_id="should_start_but_not_stop", ) check(producer.flush(5) == 0, "Unable to flush kafka messages.") sleep(30) msgs = consume_everything("TEST_writerStatus") stopped = False started = False message = msgs[-1] status_info = deserialise_x5f2(message.value()) message = json.loads(status_info.status_json) if message["start_time"] > 0 and message["job_id"] == job_id: started = True if message["stop_time"] == 0 and message["job_id"] == "": stopped = True assert started assert not stopped # Clean up by stopping writing publish_run_stop_message(producer, job_id=job_id) check(producer.flush(5) == 0, "Unable to flush kafka messages.") sleep(3)
def test_data_reaches_file(docker_compose): producer = create_producer() sleep(20) # Start file writing send_writer_command("commands/example-json-command.json", producer, start_time=docker_compose) producer.flush() # Give it some time to accumulate data sleep(10) # Stop file writing send_writer_command("commands/stop-command.json", producer) sleep(10) send_writer_command("commands/writer-exit.json", producer) sleep(10) producer.flush() # Allow time for the file writing to complete for i in range(100): if os.path.isfile("output-files/output_file.nxs"): break sleep(1) file = h5py.File("output-files/output_file.nxs", mode='r') # Static checks assert not file.swmr_mode assert file["entry/start_time"][...] == '2016-04-12T02:58:52' assert file["entry/end_time"][...] == '2016-04-12T03:29:11' assert file["entry/duration"][...] == 1817.0 assert file["entry/features"][0] == 10138143369737381149 assert file["entry/user_1/affiliation"][...] == 'ISIS, STFC' assert np.allclose( file["entry/instrument/monitor1/transformations/location"]. attrs["vector"], np.array([0.0, 0.0, -1.0])) assert file["entry/instrument/monitor1/transformations/location"].attrs[ "transformation_type"] == "translation" # Streamed checks # Ev42 event data (Detector_1) assert file["entry/detector_1_events/event_id"][0] == 99406 assert file["entry/detector_1_events/event_id"][1] == 98345 # f142 Sample env (Sample) assert np.isclose( 21.0, file["entry/sample/sample_env_logs/Det_Temp_RRB/value"][0])
def test_two_different_writer_modules_with_same_flatbuffer_id(docker_compose): producer = create_producer() start_time = unix_time_milliseconds(datetime.utcnow()) - 10000 for i in range(10): publish_f142_message( producer, "TEST_sampleEnv", int(start_time + i * 1000), source_name="test_source_1", ) publish_f142_message( producer, "TEST_sampleEnv", int(start_time + i * 1000), source_name="test_source_2", ) check(producer.flush(5) == 0, "Unable to flush kafka messages.") # Start file writing publish_run_start_message( producer, "commands/nexus_structure_multiple_modules.json", "output_file_multiple_modules.nxs", start_time=int(start_time), stop_time=int(start_time + 5 * 1000), ) # Give it some time to accumulate data sleep(10) filepath = "output-files/output_file_multiple_modules.nxs" with OpenNexusFileWhenAvailable(filepath) as file: assert ( len(file["entry/sample/dataset1/time"][:]) > 0 and len(file["entry/sample/dataset1/value"][:]) > 0 ), "f142 module should have written this dataset, it should have written a value and time" assert ( "cue_timestamp_zero" not in file["entry/sample/dataset2"] ), "f142_test module should have written this dataset, it writes cue_index but no cue_timestamp_zero" assert (len(file["entry/sample/dataset2/cue_index"][:]) > 0), "Expected index values, found none." for i in range(len(file["entry/sample/dataset2/cue_index"][:])): assert (file["entry/sample/dataset2/cue_index"][i] == i ), "Expect consecutive integers to be written by f142_test"
def test_long_run(docker_compose_long_running): producer = create_producer() sleep(20) # Start file writing send_writer_command("commands/longrunning.json", producer, topic="TEST_writerCommandLR", start_time=docker_compose_long_running) producer.flush() sleep(10) # Minimum length of the test is determined by (pv_updates * 3) + 10 seconds pv_updates = 6000 # range is exclusive of the last number, so in order to get 1 to pv_updates we need to use pv_updates+1 for i in range(1, pv_updates + 1): change_pv_value("SIMPLE:DOUBLE", i) sleep(3) send_writer_command("commands/stop-command-lr.json", producer, topic="TEST_writerCommandLR") producer.flush() sleep(30) # Allow time for the file writing to complete for i in range(100): if os.path.isfile("output-files/output_file_lr.nxs"): break sleep(1) file = h5py.File("output-files/output_file_lr.nxs", mode='r') counter = 1 # check values are contiguous for value in file["entry/cont_data/value"]: assert isclose(value, counter) counter += 1 # check that the last value is the same as the number of updates assert counter == pv_updates + 1 with open("logs/lr_status_messages.log", 'w+') as file: status_messages = poll_everything("TEST_writerStatus") for msg in status_messages: file.write(str(msg.value(), encoding='utf-8') + "\n")
def test_ep00(docker_compose): producer = create_producer() topic = "TEST_epicsConnectionStatus" sleep(10) # Start file writing job_id = publish_run_start_message( producer, "commands/nexus_structure_epics_status.json", "output_file_ep00.nxs", start_time=current_unix_time_ms(), ) sleep(5) first_timestamp = current_unix_time_ms() publish_ep00_message(producer, topic, EventType.NEVER_CONNECTED, first_timestamp) second_timestamp = current_unix_time_ms() publish_ep00_message(producer, topic, EventType.CONNECTED, kafka_timestamp=second_timestamp) # Give it some time to accumulate data sleep(10) # Stop file writing publish_run_stop_message(producer, job_id, stop_time=current_unix_time_ms()) filepath = "output-files/output_file_ep00.nxs" with OpenNexusFileWhenAvailable(filepath) as file: assert file["EpicsConnectionStatus/connection_status_time"][ 0] == milliseconds_to_nanoseconds(first_timestamp) assert file["EpicsConnectionStatus/connection_status"][ 0] == b"NEVER_CONNECTED" assert file["EpicsConnectionStatus/connection_status_time"][ 1] == milliseconds_to_nanoseconds(second_timestamp) assert file["EpicsConnectionStatus/connection_status"][ 1] == b"CONNECTED"
def test_long_run(docker_compose_long_running): producer = create_producer() sleep(20) # Start file writing job_id = publish_run_start_message( producer, "commands/nexus_structure_long_running.json", nexus_filename="output_file_lr.nxs", topic="TEST_writerCommandLR", start_time=int(docker_compose_long_running), ) sleep(10) # Minimum length of the test is determined by (pv_updates * 3) + 10 seconds pv_updates = 6000 # range is exclusive of the last number, so in order to get 1 to pv_updates we need to use pv_updates+1 for i in range(1, pv_updates + 1): change_pv_value("SIMPLE:DOUBLE", i) sleep(3) publish_run_stop_message(producer, job_id=job_id, topic="TEST_writerCommandLR") sleep(30) filepath = "output-files/output_file_lr.nxs" with OpenNexusFileWhenAvailable(filepath) as file: counter = 1 # check values are contiguous for value in file["entry/cont_data/value"]: assert isclose(value, counter) counter += 1 # check that the last value is the same as the number of updates assert counter == pv_updates + 1 with open("logs/lr_status_messages.log", "w+") as file: status_messages = consume_everything("TEST_writerStatus") for msg in status_messages: file.write(str(deserialise_x5f2(msg.value())) + "\n")
def test_ignores_commands_with_incorrect_job_id(docker_compose): producer = create_producer() sleep(10) # Ensure TEST_sampleEnv topic exists publish_f142_message( producer, "TEST_sampleEnv", int(unix_time_milliseconds(datetime.utcnow())) ) sleep(10) # Start file writing job_id = publish_run_start_message( producer, "commands/nexus_structure.json", "output_file_jobid.nxs", start_time=int(docker_compose), ) sleep(10) # Request stop but with slightly wrong job_id publish_run_stop_message(producer, job_id[:-1]) msgs = consume_everything("TEST_writerStatus") # Poll a few times on the status topic and check the final message read # indicates that is still running message = msgs[-1] status_info = deserialise_x5f2(message.value()) message = json.loads(status_info.status_json) if message["file_being_written"] == "": running = False else: running = True assert running
def test_static_data_reaches_file(docker_compose): producer = create_producer() sleep(20) # Start file writing send_writer_command("commands/static-data-add.json", producer, start_time=docker_compose) producer.flush() # Give it some time to accumulate data sleep(10) # Stop file writing send_writer_command("commands/static-data-stop.json", producer) sleep(10) send_writer_command("commands/writer-exit.json", producer) producer.flush() # Allow time for the file writing to complete for i in range(100): if os.path.isfile("output-files/output_file_static.nxs"): break sleep(1) file = h5py.File("output-files/output_file_static.nxs", mode='r') # Static checks assert not file.swmr_mode assert file["entry/start_time"].value == '2016-04-12T02:58:52' assert file["entry/end_time"].value == '2016-04-12T03:29:11' assert file["entry/duration"].value == 1817.0 assert file["entry/features"][0] == 10138143369737381149 assert file["entry/user_1/affiliation"].value == 'ISIS, STFC' assert np.allclose( file["entry/instrument/monitor1/transformations/location"]. attrs["vector"], np.array([0.0, 0.0, -1.0])) assert file["entry/instrument/monitor1/transformations/location"].attrs[ "transformation_type"] == "translation"