示例#1
0
 def hard_stop(self, timeout: Optional[float] = None) -> None:
     if self._fifo_read_producer is not None:
         self._fifo_read_producer.hard_stop(timeout=timeout)
     if "wire_outs" in self._simulated_response_queues:
         wire_outs = self._simulated_response_queues["wire_outs"]
         for wire_out_queue in wire_outs.values():
             drain_queue(wire_out_queue)
示例#2
0
def fixture_runnable_mantarray_mc_simulator():
    testing_queue = MPQueue()
    error_queue = MPQueue()
    input_queue = MPQueue()
    output_queue = MPQueue()
    simulator = MantarrayMcSimulator(
        input_queue,
        output_queue,
        error_queue,
        testing_queue,
        read_timeout_seconds=QUEUE_CHECK_TIMEOUT_SECONDS,
    )

    items_dict = {
        "input_queue": input_queue,
        "output_queue": output_queue,
        "error_queue": error_queue,
        "testing_queue": testing_queue,
        "simulator": simulator,
    }
    yield items_dict

    simulator.stop()
    # Tanner (2/25/21): Remove any data packets remaining in read queue. This is faster than hard_stop which will attempt to drain every queue
    drain_queue(output_queue)

    # only join if the process has actually been started. Sometimes a test will fail before this happens in which case join will raise an error
    if simulator.is_alive():
        simulator.join()
示例#3
0
def test_OkCommunicationProcess_teardown_after_loop__can_teardown_while_managed_acquisition_is_running_with_simulator(
    running_process_with_simulated_board,
    mocker,
):
    simulator = RunningFIFOSimulator()
    running_process_items = running_process_with_simulated_board(simulator)
    ok_process = running_process_items["ok_process"]
    input_queue = running_process_items["board_queues"][0][0]
    comm_to_main_queue = running_process_items["board_queues"][0][1]

    ok_process.pause(
    )  # pause so it can be asserted that both commands populate ok_comm's input queue
    input_queue.put_nowait({
        "communication_type": "debug_console",
        "command": "initialize_board",
        "bit_file_name": None,
    })
    input_queue.put_nowait(
        get_mutable_copy_of_START_MANAGED_ACQUISITION_COMMUNICATION())
    confirm_queue_is_eventually_of_size(input_queue, 2)
    ok_process.resume()
    ok_process.soft_stop()
    confirm_parallelism_is_stopped(
        ok_process,
        timeout_seconds=10,
    )

    # drain the queue to avoid broken pipe errors
    drain_queue(
        comm_to_main_queue,
        timeout_seconds=QUEUE_CHECK_TIMEOUT_SECONDS,
    )
示例#4
0
def set_connection_and_register_simulator(
    mc_process_fixture,
    simulator_fixture,
):
    """Send a single status beacon in order to register magic word.

    Sets connection on board index 0.
    """
    mc_process = mc_process_fixture["mc_process"]
    output_queue = mc_process_fixture["board_queues"][0][1]
    simulator = simulator_fixture["simulator"]
    testing_queue = simulator_fixture["testing_queue"]

    num_iterations = 1
    if not isinstance(simulator, MantarrayMcSimulatorNoBeacons):
        # first iteration to send possibly truncated beacon
        invoke_process_run_and_check_errors(simulator)
        num_iterations += 1  # Tanner (4/6/21): May need to run two iterations in case the first beacon is not truncated. Not doing this will cause issues with output_queue later on
    # send single non-truncated beacon and then register with mc_process
    put_object_into_queue_and_raise_error_if_eventually_still_empty(
        {"command": "send_single_beacon"}, testing_queue)
    invoke_process_run_and_check_errors(simulator)
    mc_process.set_board_connection(0, simulator)
    invoke_process_run_and_check_errors(mc_process,
                                        num_iterations=num_iterations)
    # remove status code log message(s)
    drain_queue(output_queue)
示例#5
0
def test_DataAnalyzerProcess_beta_1_performance__fill_data_analysis_buffer(
    runnable_four_board_analyzer_process, ):
    # 11 seconds of data (625 Hz) coming in from File Writer to going through to Main
    #
    # mantarray-waveform-analysis v0.3:     4.148136512
    # mantarray-waveform-analysis v0.3.1:   3.829136133
    # mantarray-waveform-analysis v0.4.0:   3.323093677
    # remove concatenate:                   2.966678695
    # 30 Hz Bessel filter:                  2.930061808  # Tanner (9/3/20): not intended to speed anything up, just adding this to show it had it didn't have much affect on performance
    # 30 Hz Butterworth filter:             2.935009033  # Tanner (9/10/20): not intended to speed anything up, just adding this to show it had it didn't have much affect on performance
    #
    # added twitch metric analysis:         3.013469479
    # initial pulse3D import:               3.855403546
    # pulse3D 0.23.3:                       3.890723909

    p, board_queues, comm_from_main_queue, comm_to_main_queue, _ = runnable_four_board_analyzer_process
    put_object_into_queue_and_raise_error_if_eventually_still_empty(
        get_mutable_copy_of_START_MANAGED_ACQUISITION_COMMUNICATION(),
        comm_from_main_queue,
    )
    invoke_process_run_and_check_errors(p, perform_setup_before_loop=True)

    num_seconds = MIN_NUM_SECONDS_NEEDED_FOR_ANALYSIS + 1
    fill_da_input_data_queue(board_queues[0][0], num_seconds)
    start = time.perf_counter_ns()
    invoke_process_run_and_check_errors(p,
                                        num_iterations=num_seconds * (24 + 6))
    dur_seconds = (time.perf_counter_ns() - start) / 10**9

    # prevent BrokenPipeErrors
    drain_queue(board_queues[0][1])
    drain_queue(comm_to_main_queue)

    # print(f"Duration (seconds): {dur_seconds}")  # Eli (4/8/21): this is commented code that is deliberately kept in the codebase since it is often toggled on/off during optimization
    assert dur_seconds < 10
示例#6
0
    def _check_and_handle_data_analyzer_to_main_queue(self) -> None:
        process_manager = self._process_manager

        data_analyzer_to_main = (process_manager.queue_container(
        ).get_communication_queue_from_data_analyzer_to_main())
        try:
            communication = data_analyzer_to_main.get(
                timeout=SECONDS_TO_WAIT_WHEN_POLLING_QUEUES)
        except queue.Empty:
            return

        # Eli (2/12/20) is not sure how to test that a lock is being acquired...so be careful about refactoring this
        msg = f"Communication from the Data Analyzer: {communication}"
        with self._lock:
            logger.info(msg)

        communication_type = communication["communication_type"]
        if communication_type == "data_available":
            if self._values_to_share_to_server[
                    "system_status"] == BUFFERING_STATE:
                self._data_dump_buffer_size += 1
                if self._data_dump_buffer_size == 2:
                    self._values_to_share_to_server[
                        "system_status"] = LIVE_VIEW_ACTIVE_STATE
        elif communication_type == "acquisition_manager":
            if communication["command"] == "stop_managed_acquisition":
                # fmt: off
                # remove any leftover outgoing items
                da_data_out_queue = (self._process_manager.queue_container().
                                     get_data_analyzer_board_queues()[0][1])
                # fmt: on
                drain_queue(da_data_out_queue)
示例#7
0
def test_DataAnalyzerProcess_beta_2_performance__fill_data_analysis_buffer(
    runnable_four_board_analyzer_process, ):
    # 11 seconds of data (100 Hz) coming in from File Writer to going through to Main
    #
    # initial pulse3D import:                             1.662150824
    # pulse3D 0.23.3:                                     1.680566285

    p, board_queues, comm_from_main_queue, comm_to_main_queue, _ = runnable_four_board_analyzer_process
    p._beta_2_mode = True
    p.change_magnetometer_config(
        {
            "magnetometer_config": DEFAULT_MAGNETOMETER_CONFIG,
            "sampling_period": DEFAULT_SAMPLING_PERIOD
        }, )

    put_object_into_queue_and_raise_error_if_eventually_still_empty(
        dict(START_MANAGED_ACQUISITION_COMMUNICATION),
        comm_from_main_queue,
    )
    invoke_process_run_and_check_errors(p, perform_setup_before_loop=True)

    num_seconds = MIN_NUM_SECONDS_NEEDED_FOR_ANALYSIS + 1
    fill_da_input_data_queue(board_queues[0][0], num_seconds)
    start = time.perf_counter_ns()
    invoke_process_run_and_check_errors(p, num_iterations=num_seconds)
    dur_seconds = (time.perf_counter_ns() - start) / 10**9

    # prevent BrokenPipeErrors
    drain_queue(board_queues[0][1])
    drain_queue(comm_to_main_queue)

    # print(f"Duration (seconds): {dur_seconds}")  # Eli (4/8/21): this is commented code that is deliberately kept in the codebase since it is often toggled on/off during optimization
    assert dur_seconds < 10
示例#8
0
def test_DataAnalyzerProcess_beta_1_performance__single_data_packet_per_well_without_analysis(
    runnable_four_board_analyzer_process, ):
    # 1 second of data (625 Hz) coming in from File Writer to going through to Main
    #
    # start:                                0.530731389
    # added twitch metric analysis:         0.578328276
    # initial pulse3D import:               0.533860423
    # pulse3D 0.23.3:                       0.539447351

    p, board_queues, comm_from_main_queue, comm_to_main_queue, _ = runnable_four_board_analyzer_process
    put_object_into_queue_and_raise_error_if_eventually_still_empty(
        get_mutable_copy_of_START_MANAGED_ACQUISITION_COMMUNICATION(),
        comm_from_main_queue,
    )
    invoke_process_run_and_check_errors(p, perform_setup_before_loop=True)

    num_seconds = 1
    fill_da_input_data_queue(board_queues[0][0], num_seconds)
    start = time.perf_counter_ns()
    invoke_process_run_and_check_errors(p,
                                        num_iterations=num_seconds * (24 + 6))
    dur_seconds = (time.perf_counter_ns() - start) / 10**9

    # prevent BrokenPipeErrors
    drain_queue(board_queues[0][1])
    drain_queue(comm_to_main_queue)

    # print(f"Duration (seconds): {dur_seconds}")  # Eli (4/8/21): this is commented code that is deliberately kept in the codebase since it is often toggled on/off during optimization
    assert dur_seconds < 2
示例#9
0
def test_DataAnalyzerProcess_beta_1_performance__first_second_of_data_with_analysis(
    runnable_four_board_analyzer_process, ):
    # Fill data analysis buffer with 10 seconds of data to start metric analysis,
    # Then record duration of sending 1 additional second of data
    #
    # start:                                0.547285524
    # initial pulse3D import:               0.535316489
    # pulse3D 0.23.3:                       0.535428579

    p, board_queues, comm_from_main_queue, comm_to_main_queue, _ = runnable_four_board_analyzer_process
    put_object_into_queue_and_raise_error_if_eventually_still_empty(
        get_mutable_copy_of_START_MANAGED_ACQUISITION_COMMUNICATION(),
        comm_from_main_queue,
    )
    invoke_process_run_and_check_errors(p, perform_setup_before_loop=True)

    # load data
    num_seconds = MIN_NUM_SECONDS_NEEDED_FOR_ANALYSIS + 1
    fill_da_input_data_queue(board_queues[0][0], num_seconds)
    invoke_process_run_and_check_errors(
        p, num_iterations=MIN_NUM_SECONDS_NEEDED_FOR_ANALYSIS * (24 + 6))

    # send additional data and time analysis
    start = time.perf_counter_ns()
    invoke_process_run_and_check_errors(p, num_iterations=(24 + 6))
    dur_seconds = (time.perf_counter_ns() - start) / 10**9

    # prevent BrokenPipeErrors
    drain_queue(board_queues[0][1])
    drain_queue(comm_to_main_queue)

    # print(f"Duration (seconds): {dur_seconds}")  # Eli (4/8/21): this is commented code that is deliberately kept in the codebase since it is often toggled on/off during optimization
    assert dur_seconds < 2
示例#10
0
def test_FileWriterProcess__does_not_include_recording_metrics_in_performance_metrics_when_not_recording(
    test_data_packet, test_description, four_board_file_writer_process, mocker
):
    file_writer_process = four_board_file_writer_process["fw_process"]
    to_main_queue = four_board_file_writer_process["to_main_queue"]
    incoming_data_queue = four_board_file_writer_process["board_queues"][0][0]

    if test_data_packet == SIMPLE_BETA_2_CONSTRUCT_DATA_FROM_ALL_WELLS:
        file_writer_process.set_beta_2_mode()

    # add data packets
    num_packets_to_send = 3  # send arbitrary number of packets
    for _ in range(num_packets_to_send):
        incoming_data_queue.put_nowait(test_data_packet)
    confirm_queue_is_eventually_of_size(incoming_data_queue, num_packets_to_send)
    # set to 0 to speed up test
    file_writer_process._minimum_iteration_duration_seconds = 0  # pylint: disable=protected-access
    # get performance metrics dict
    invoke_process_run_and_check_errors(
        file_writer_process,
        num_iterations=FILE_WRITER_PERFOMANCE_LOGGING_NUM_CYCLES,
        perform_setup_before_loop=True,
    )
    confirm_queue_is_eventually_of_size(to_main_queue, 1)
    actual = to_main_queue.get(timeout=QUEUE_CHECK_TIMEOUT_SECONDS)
    actual = actual["message"]
    # make sure recording metrics not present
    assert "num_recorded_data_points_metrics" not in actual
    assert "recording_duration_metrics" not in actual

    # Tanner (6/1/21): avoid BrokenPipeErrors
    drain_queue(four_board_file_writer_process["board_queues"][0][1])
示例#11
0
def test_drain_queue__used_default_queue_get_timeout_if_timeout_seconds_not_given(
    mocker,
):
    q = Queue()
    q.put("item")
    spied_get = mocker.spy(q, "get")
    drain_queue(q)
    assert spied_get.call_args[1]["timeout"] == QUEUE_CHECK_TIMEOUT_SECONDS
示例#12
0
def test_drain_queue__calls_queue_get_correctly(mocker):
    q = Queue()
    q.put(1)
    spied_get = mocker.spy(q, "get")

    expected_timeout = 5.9
    drain_queue(q, timeout_seconds=expected_timeout)
    assert spied_get.call_args[1]["timeout"] == expected_timeout
    assert spied_get.call_args[1]["block"] is True
示例#13
0
def test_McCommunicationProcess_teardown_after_loop__handles_fatal_instrument_error(
    patch_print,
    four_board_mc_comm_process_no_handshake,
    mantarray_mc_simulator_no_beacon,
    mocker,
):
    mc_process = four_board_mc_comm_process_no_handshake["mc_process"]
    output_queue = four_board_mc_comm_process_no_handshake["board_queues"][0][1]
    simulator = mantarray_mc_simulator_no_beacon["simulator"]
    testing_queue = mantarray_mc_simulator_no_beacon["testing_queue"]
    set_connection_and_register_simulator(
        four_board_mc_comm_process_no_handshake, mantarray_mc_simulator_no_beacon
    )

    mocked_write = mocker.patch.object(simulator, "write", autospec=True)

    # put simulator in error state before sending beacon
    put_object_into_queue_and_raise_error_if_eventually_still_empty(
        {
            "command": "set_status_code",
            "status_code": SERIAL_COMM_FATAL_ERROR_CODE,
        },
        testing_queue,
    )
    invoke_process_run_and_check_errors(simulator)
    # add one beacon for mc_process to read normally
    put_object_into_queue_and_raise_error_if_eventually_still_empty(
        {"command": "send_single_beacon"},
        testing_queue,
    )
    invoke_process_run_and_check_errors(simulator)
    # add read bytes to flush from simulator
    test_read_bytes = [
        bytes(SERIAL_COMM_MAX_PACKET_LENGTH_BYTES),
        bytes(SERIAL_COMM_MAX_PACKET_LENGTH_BYTES),
        bytes(SERIAL_COMM_MAX_PACKET_LENGTH_BYTES // 2),  # arbitrary final length
    ]
    put_object_into_queue_and_raise_error_if_eventually_still_empty(
        {
            "command": "add_read_bytes",
            "read_bytes": test_read_bytes,
        },
        testing_queue,
    )
    invoke_process_run_and_check_errors(simulator)
    # read beacon, raise, error, then flush remaining serial data
    with pytest.raises(MantarrayInstrumentError):
        invoke_process_run_and_check_errors(
            mc_process,
            perform_teardown_after_loop=True,
        )
    # check that all data was flushed here
    assert simulator.in_waiting == 0
    # check that no commands were sent
    mocked_write.assert_not_called()
    drain_queue(output_queue)
示例#14
0
def _drain_board_queues(
    board: Tuple[Queue[Any],  # pylint: disable=unsubscriptable-object
                 Queue[Any],  # pylint: disable=unsubscriptable-object
                 Queue[Any],  # pylint: disable=unsubscriptable-object
                 ],
) -> Dict[str, List[Any]]:
    board_dict = dict()
    board_dict["main_to_instrument_comm"] = drain_queue(board[0])
    board_dict["instrument_comm_to_main"] = drain_queue(board[1])
    board_dict["instrument_comm_to_file_writer"] = drain_queue(board[2])
    return board_dict
示例#15
0
def test_set_up_socketio_handlers__sets_up_socketio_events_correctly(
        mocker, fsio_test_client_creator):
    mocked_start_bg_task = mocker.patch.object(main.socketio,
                                               "start_background_task",
                                               autospec=True)

    test_queue = TestingQueue()

    data_sender = main._set_up_socketio_handlers(test_queue)

    test_clients = []
    try:
        # make sure background thread is started correctly after first connection
        test_clients.append(
            fsio_test_client_creator(main.socketio, main.flask_app))
        mocked_start_bg_task.assert_called_once_with(data_sender)
        # make sure background thread is not restarted correctly after second connection
        test_clients.append(
            fsio_test_client_creator(main.socketio, main.flask_app))
        mocked_start_bg_task.assert_called_once_with(data_sender)
    finally:
        # Tanner (1/18/22): wrap in finally block so that clients are disconnected even if the test fails
        for client in test_clients:
            if client.connected:
                client.disconnect()
    # make sure tombstone message only sent once
    assert drain_queue(test_queue) == [{"data_type": "tombstone"}]
示例#16
0
def test_FileWriterProcess_teardown_after_loop__can_teardown_process_while_recording__and_log_stop_recording_message(
    test_start_recording_command,
    test_description,
    running_four_board_file_writer_process,
    mocker,
    patch_print,
):
    fw_process = running_four_board_file_writer_process["fw_process"]
    to_main_queue = running_four_board_file_writer_process["to_main_queue"]
    from_main_queue = running_four_board_file_writer_process["from_main_queue"]

    if test_start_recording_command == GENERIC_BETA_2_START_RECORDING_COMMAND:
        fw_process.set_beta_2_mode()

    put_object_into_queue_and_raise_error_if_eventually_still_empty(
        test_start_recording_command, from_main_queue
    )

    fw_process.soft_stop()
    fw_process.join()

    queue_items = drain_queue(to_main_queue)

    actual = queue_items[-1]
    assert (
        actual["message"]
        == "Data is still be written to file. Stopping recording and closing files to complete teardown"
    )
示例#17
0
def test_McCommunicationProcess__sets_default_magnetometer_config_after_instrument_initially_reaches_idle_ready_state__and_sends_default_config_process_monitor__if_setup_before_loop_was_performed(
        four_board_mc_comm_process_no_handshake, mantarray_mc_simulator,
        mocker):
    mc_process = four_board_mc_comm_process_no_handshake["mc_process"]
    output_queue = four_board_mc_comm_process_no_handshake["board_queues"][0][
        1]
    simulator = mantarray_mc_simulator["simulator"]
    testing_queue = mantarray_mc_simulator["testing_queue"]
    set_connection_and_register_simulator(
        four_board_mc_comm_process_no_handshake, mantarray_mc_simulator)

    mocker.patch.object(  # Tanner (4/6/21): Need to prevent automatic beacons without interrupting the beacons sent after status code updates
        mc_simulator,
        "_get_secs_since_last_status_beacon",
        return_value=0,
        autospec=True,
    )
    mocker.patch.object(  # Tanner (5/22/21): performing set up before loop means that mc_comm will try to start the simulator process which will slow this test down
        simulator,
        "start",
        autospec=True)

    invoke_process_run_and_check_errors(mc_process,
                                        perform_setup_before_loop=True)

    # put simulator in time sync ready status and send beacon
    test_commands = [
        {
            "command": "set_status_code",
            "status_code": SERIAL_COMM_TIME_SYNC_READY_CODE
        },
        {
            "command": "send_single_beacon"
        },
    ]
    handle_putting_multiple_objects_into_empty_queue(test_commands,
                                                     testing_queue)

    invoke_process_run_and_check_errors(simulator, num_iterations=2)
    # read status beacon and send time sync command
    invoke_process_run_and_check_errors(mc_process)
    # process command and switch to idle ready state
    invoke_process_run_and_check_errors(simulator)

    # run mc_process 3 times to process set time command response, process barcode comm, then trigger automatic setting of default magnetometer config
    invoke_process_run_and_check_errors(mc_process, num_iterations=3)
    # process change magnetometer config command response
    invoke_process_run_and_check_errors(simulator)
    # send config to main
    invoke_process_run_and_check_errors(mc_process)
    # check that config was sent to main
    to_main_items = drain_queue(output_queue)
    comm_to_main = to_main_items[-1]
    assert comm_to_main["communication_type"] == "default_magnetometer_config"
    assert comm_to_main["magnetometer_config_dict"] == {
        "sampling_period": DEFAULT_SAMPLING_PERIOD,
        "magnetometer_config": DEFAULT_MAGNETOMETER_CONFIG,
    }
示例#18
0
def test_McCommunicationProcess__requests_metadata_from_instrument_after_it_initially_reaches_idle_ready_state__if_setup_before_loop_was_performed(
        four_board_mc_comm_process_no_handshake, mantarray_mc_simulator,
        mocker):
    mc_process = four_board_mc_comm_process_no_handshake["mc_process"]
    output_queue = four_board_mc_comm_process_no_handshake["board_queues"][0][
        1]
    simulator = mantarray_mc_simulator["simulator"]
    testing_queue = mantarray_mc_simulator["testing_queue"]
    set_connection_and_register_simulator(
        four_board_mc_comm_process_no_handshake, mantarray_mc_simulator)

    mocker.patch.object(  # Tanner (4/6/21): Need to prevent automatic beacons without interrupting the beacons sent after status code updates
        mc_simulator,
        "_get_secs_since_last_status_beacon",
        return_value=0,
        autospec=True,
    )
    mocker.patch.object(  # Tanner (5/22/21): performing set up before loop means that mc_comm will try to start the simulator process which will slow this test down
        simulator,
        "start",
        autospec=True)

    invoke_process_run_and_check_errors(mc_process,
                                        perform_setup_before_loop=True)
    # setting this value to False so as to not have unrelated queue messages interfere with this test
    mc_process._auto_set_magnetometer_config = False  # pylint: disable=protected-access

    # put simulator in time sync ready status and send beacon
    test_commands = [
        {
            "command": "set_status_code",
            "status_code": SERIAL_COMM_TIME_SYNC_READY_CODE
        },
        {
            "command": "send_single_beacon"
        },
    ]
    handle_putting_multiple_objects_into_empty_queue(test_commands,
                                                     testing_queue)

    invoke_process_run_and_check_errors(simulator, num_iterations=2)
    # read status beacon and send time sync command
    invoke_process_run_and_check_errors(mc_process)
    # process command and switch to idle ready state
    invoke_process_run_and_check_errors(simulator)

    # run mc_process 3 times to process set time command response, process barcode comm, then trigger automatic collection of metadata
    invoke_process_run_and_check_errors(mc_process, num_iterations=3)
    # process get metadata command
    invoke_process_run_and_check_errors(simulator)
    # send metadata to main
    invoke_process_run_and_check_errors(mc_process)
    # check that metadata was sent to main
    to_main_items = drain_queue(output_queue)
    metadata_comm = to_main_items[-1]
    assert metadata_comm["communication_type"] == "metadata_comm"
    assert metadata_comm[
        "metadata"] == MantarrayMcSimulator.default_metadata_values
示例#19
0
def test_FileWriterProcess_soft_stop_not_allowed_if_command_from_main_still_in_queue(
    four_board_file_writer_process,
):
    file_writer_process = four_board_file_writer_process["fw_process"]
    from_main_queue = four_board_file_writer_process["from_main_queue"]

    # The first communication will be processed, but if there is a second one in the queue then the soft stop should be disabled
    this_command = copy.deepcopy(GENERIC_BETA_1_START_RECORDING_COMMAND)
    this_command["active_well_indices"] = [1]
    from_main_queue.put_nowait(this_command)
    from_main_queue.put_nowait(copy.deepcopy(this_command))
    confirm_queue_is_eventually_of_size(from_main_queue, 2)
    file_writer_process.soft_stop()
    invoke_process_run_and_check_errors(file_writer_process)
    confirm_queue_is_eventually_of_size(from_main_queue, 1)
    assert file_writer_process.is_stopped() is False

    # Tanner (3/8/21): Prevent BrokenPipeErrors
    drain_queue(from_main_queue)
示例#20
0
def test_FileWriterProcess_soft_stop_not_allowed_if_incoming_data_still_in_queue_for_board_0(
    four_board_file_writer_process,
):
    file_writer_process = four_board_file_writer_process["fw_process"]
    board_queues = four_board_file_writer_process["board_queues"]

    # The first communication will be processed, but if there is a second one in the queue then the soft stop should be disabled
    board_queues[0][0].put_nowait(SIMPLE_BETA_1_CONSTRUCT_DATA_FROM_WELL_0)
    put_object_into_queue_and_raise_error_if_eventually_still_empty(
        SIMPLE_BETA_1_CONSTRUCT_DATA_FROM_WELL_0,
        board_queues[0][0],
    )

    confirm_queue_is_eventually_of_size(board_queues[0][0], 2)

    file_writer_process.soft_stop()
    invoke_process_run_and_check_errors(file_writer_process)
    assert file_writer_process.is_stopped() is False

    # Tanner (3/8/21): Prevent BrokenPipeErrors
    drain_queue(board_queues[0][0])
示例#21
0
def test_OkCommunicationProcess_managed_acquisition__does_not_log_percent_use_metrics_in_first_logging_cycle(
        four_board_comm_process, mocker):
    mocker.patch.object(OkCommunicationProcess,
                        "_is_ready_to_read_from_fifo",
                        return_value=True)

    ok_process = four_board_comm_process["ok_process"]
    board_queues = four_board_comm_process["board_queues"]
    ok_process._time_of_last_fifo_read[0] = datetime.datetime(  # pylint: disable=protected-access
        2020, 7, 3, 9, 25, 0, 0)
    ok_process._timepoint_of_last_fifo_read[0] = 10  # pylint: disable=protected-access
    ok_process._minimum_iteration_duration_seconds = 0  # pylint: disable=protected-access

    fifo = TestingQueue()
    for _ in range(INSTRUMENT_COMM_PERFOMANCE_LOGGING_NUM_CYCLES):
        fifo.put_nowait(produce_data(2, 0))
    confirm_queue_is_eventually_of_size(
        fifo, INSTRUMENT_COMM_PERFOMANCE_LOGGING_NUM_CYCLES)
    queues = {"pipe_outs": {PIPE_OUT_FIFO: fifo}}
    simulator = FrontPanelSimulator(queues)
    simulator.initialize_board()
    ok_process.set_board_connection(0, simulator)
    board_queues[0][0].put_nowait(
        get_mutable_copy_of_START_MANAGED_ACQUISITION_COMMUNICATION())
    confirm_queue_is_eventually_of_size(board_queues[0][0], 1)

    invoke_process_run_and_check_errors(
        ok_process,
        num_iterations=INSTRUMENT_COMM_PERFOMANCE_LOGGING_NUM_CYCLES,
        perform_setup_before_loop=True,
    )

    assert_queue_is_eventually_not_empty(board_queues[0][1])
    queue_items = drain_queue(board_queues[0][1])
    actual = queue_items[-1]
    assert "message" in actual
    assert "percent_use_metrics" not in actual["message"]

    # Tanner (5/29/20): Closing a queue while it is not empty (especially when very full) causes BrokePipeErrors, so flushing it before the test ends prevents this
    drain_queue(board_queues[0][2])
示例#22
0
def test_DataAnalyzerProcess_beta_2_performance__first_second_of_data_with_analysis(
    runnable_four_board_analyzer_process, ):
    # Fill data analysis buffer with 10 seconds of data to start metric analysis,
    # Then record duration of sending 1 additional second of data
    #
    # initial pulse3D import:                             0.334087008
    # pulse3D 0.23.3:                                     0.337370183

    p, board_queues, comm_from_main_queue, comm_to_main_queue, _ = runnable_four_board_analyzer_process
    p._beta_2_mode = True
    p.change_magnetometer_config(
        {
            "magnetometer_config": DEFAULT_MAGNETOMETER_CONFIG,
            "sampling_period": DEFAULT_SAMPLING_PERIOD
        }, )

    put_object_into_queue_and_raise_error_if_eventually_still_empty(
        dict(START_MANAGED_ACQUISITION_COMMUNICATION),
        comm_from_main_queue,
    )
    invoke_process_run_and_check_errors(p, perform_setup_before_loop=True)

    # load data
    num_seconds = MIN_NUM_SECONDS_NEEDED_FOR_ANALYSIS + 1
    fill_da_input_data_queue(board_queues[0][0], num_seconds)
    invoke_process_run_and_check_errors(p, num_iterations=num_seconds - 1)

    # send additional data and time analysis
    start = time.perf_counter_ns()
    invoke_process_run_and_check_errors(p)
    dur_seconds = (time.perf_counter_ns() - start) / 10**9

    # prevent BrokenPipeErrors
    drain_queue(board_queues[0][1])
    drain_queue(comm_to_main_queue)

    # print(f"Duration (seconds): {dur_seconds}")  # Eli (4/8/21): this is commented code that is deliberately kept in the codebase since it is often toggled on/off during optimization
    assert dur_seconds < 2
示例#23
0
def test_McCommunicationProcess__waits_until_instrument_is_done_rebooting_to_send_commands(
        four_board_mc_comm_process_no_handshake, mantarray_mc_simulator,
        mocker):
    mc_process = four_board_mc_comm_process_no_handshake["mc_process"]
    board_queues = four_board_mc_comm_process_no_handshake["board_queues"]
    simulator = mantarray_mc_simulator["simulator"]
    input_queue = board_queues[0][0]
    output_queue = board_queues[0][1]

    mocker.patch.object(
        mc_simulator,
        "_get_secs_since_reboot_command",
        autospec=True,
        side_effect=[AVERAGE_MC_REBOOT_DURATION_SECONDS],
    )

    set_connection_and_register_simulator(
        four_board_mc_comm_process_no_handshake, mantarray_mc_simulator)
    reboot_command = {
        "communication_type": "to_instrument",
        "command": "reboot",
    }
    test_command = {
        "communication_type": "metadata_comm",
        "command": "get_metadata",
    }
    handle_putting_multiple_objects_into_empty_queue(
        [copy.deepcopy(reboot_command),
         copy.deepcopy(test_command)], input_queue)
    # run mc_process to sent reboot command and simulator to start reboot
    invoke_process_run_and_check_errors(mc_process)
    invoke_process_run_and_check_errors(simulator)
    # run mc_process once and confirm the command is still in queue
    invoke_process_run_and_check_errors(mc_process)
    confirm_queue_is_eventually_of_size(input_queue, 1)
    # run simulator to finish reboot
    invoke_process_run_and_check_errors(simulator)
    # run mc_process twice to confirm reboot completion and then to send command to simulator
    invoke_process_run_and_check_errors(mc_process, num_iterations=2)
    # run simulator once to process the command
    invoke_process_run_and_check_errors(simulator)
    # run mc_process to process response from instrument and send message back to main
    invoke_process_run_and_check_errors(mc_process)
    # confirm message was sent back to main
    to_main_items = drain_queue(output_queue)
    assert to_main_items[-1]["command"] == "get_metadata"
示例#24
0
def test_McCommunicationProcess_teardown_after_loop__flushes_and_logs_remaining_serial_data___if_error_occurred_in_mc_comm(
    patch_print,
    four_board_mc_comm_process_no_handshake,
    mantarray_mc_simulator_no_beacon,
    mocker,
):
    mc_process = four_board_mc_comm_process_no_handshake["mc_process"]
    output_queue = four_board_mc_comm_process_no_handshake["board_queues"][0][1]
    simulator = mantarray_mc_simulator_no_beacon["simulator"]
    testing_queue = mantarray_mc_simulator_no_beacon["testing_queue"]
    set_connection_and_register_simulator(
        four_board_mc_comm_process_no_handshake, mantarray_mc_simulator_no_beacon
    )

    # add one data packet with bad magic word to raise error and additional bytes to flush from simulator
    test_read_bytes = [
        bytes(SERIAL_COMM_MIN_FULL_PACKET_LENGTH_BYTES),  # bad packet
        bytes(SERIAL_COMM_MAX_PACKET_LENGTH_BYTES),  # start of additional bytes
        bytes(SERIAL_COMM_MAX_PACKET_LENGTH_BYTES),
        bytes(SERIAL_COMM_MAX_PACKET_LENGTH_BYTES // 2),  # arbitrary final length
    ]
    put_object_into_queue_and_raise_error_if_eventually_still_empty(
        {
            "command": "add_read_bytes",
            "read_bytes": test_read_bytes,
        },
        testing_queue,
    )
    invoke_process_run_and_check_errors(simulator)
    # read beacon then flush remaining serial data
    with pytest.raises(SerialCommIncorrectMagicWordFromMantarrayError):
        invoke_process_run_and_check_errors(
            mc_process,
            perform_teardown_after_loop=True,
        )
    assert simulator.in_waiting == 0
    # check that log message contains remaining data
    teardown_messages = drain_queue(output_queue)
    actual = teardown_messages[-1]
    assert "message" in actual, f"Correct message not found. Full message dict: {actual}"
    expected_bytes = bytes(
        sum([len(packet) for packet in test_read_bytes]) - len(SERIAL_COMM_MAGIC_WORD_BYTES)
    )
    assert str(expected_bytes) in actual["message"]
示例#25
0
def test_McCommunicationProcess_setup_before_loop__does_not_send_message_to_main_when_setup_comm_is_suppressed(
    mocker,
):
    # mock this so the process priority isn't changed during unit tests
    mocker.patch.object(mc_comm, "set_this_process_high_priority", autospec=True)

    board_queues, error_queue = generate_board_and_error_queues(num_boards=4)
    mc_process = McCommunicationProcess(board_queues, error_queue, suppress_setup_communication_to_main=True)
    mocked_create_connections = mocker.patch.object(
        mc_process, "create_connections_to_all_available_boards", autospec=True
    )

    invoke_process_run_and_check_errors(mc_process, perform_setup_before_loop=True)
    mocked_create_connections.assert_called_once()

    # Other parts of the process after setup may or may not send messages to main, so drain queue and make sure none of the items (if present) have a setup message
    to_main_queue_items = drain_queue(board_queues[0][1])
    for item in to_main_queue_items:
        if "message" in item:
            assert "Microcontroller Communication Process initiated" not in item["message"]
示例#26
0
def test_OkCommunicationProcess_teardown_after_loop__logs_message_indicating_acquisition_is_still_running(
    four_board_comm_process,
    mocker,
):
    ok_process = four_board_comm_process["ok_process"]
    board_queues = four_board_comm_process["board_queues"]
    comm_to_main_queue = board_queues[0][1]
    input_queue = board_queues[0][0]

    simulator = RunningFIFOSimulator()
    ok_process.set_board_connection(0, simulator)

    input_queue.put_nowait({
        "communication_type": "debug_console",
        "command": "initialize_board",
        "bit_file_name": None,
    })
    input_queue.put_nowait(
        get_mutable_copy_of_START_MANAGED_ACQUISITION_COMMUNICATION())
    confirm_queue_is_eventually_of_size(input_queue, 2)
    invoke_process_run_and_check_errors(ok_process,
                                        num_iterations=2,
                                        perform_teardown_after_loop=True)

    confirm_queue_is_eventually_of_size(comm_to_main_queue, 4)
    # get the last item in the queue
    queue_items = drain_queue(
        comm_to_main_queue,
        timeout_seconds=QUEUE_CHECK_TIMEOUT_SECONDS,
    )
    actual_last_queue_item = queue_items[-1]
    assert "message" in actual_last_queue_item
    assert (
        actual_last_queue_item["message"] ==
        "Board acquisition still running. Stopping acquisition to complete teardown"
    )
示例#27
0
def test_communication_with_live_board(
        four_board_mc_comm_process_hardware_test_mode):
    # pylint: disable=too-many-locals,too-many-branches  # Tanner (6/4/21): a lot of local variables and branches needed for this test
    mc_process, board_queues, error_queue = four_board_mc_comm_process_hardware_test_mode.values(
    )
    input_queue = board_queues[0][0]
    output_queue = board_queues[0][1]
    data_queue = board_queues[0][2]

    mc_process._main_firmware_update_bytes = bytes(
        int(SERIAL_COMM_MAX_PACKET_BODY_LENGTH_BYTES * 1.5))
    mc_process._channel_firmware_update_bytes = bytes(
        int(SERIAL_COMM_MAX_PACKET_BODY_LENGTH_BYTES * 1.5))

    print("\n*** BEGIN TEST ***")  # allow-print

    mc_process.start()

    for command, response_key in COMMAND_RESPONSE_SEQUENCE:
        if not isinstance(command, str):
            for idx, sub_command in enumerate(command):
                command_dict = COMMANDS_FROM_MAIN[sub_command]
                print(  # allow-print
                    f"Sending command: {sub_command}, expecting response: {response_key[idx]}"
                )
                input_queue.put_nowait(command_dict)
            expected_response = RESPONSES[response_key[-1]]
        elif command not in ("get_metadata", "change_magnetometer_config_1"):
            # get_metadata command and initial magnetometer config are automatically sent by McComm
            command_dict = COMMANDS_FROM_MAIN[command]
            print(
                f"Sending command: {command}, expecting response: {response_key}"
            )  # allow-print
            input_queue.put_nowait(command_dict)
            expected_response = RESPONSES[response_key]
        else:
            expected_response = RESPONSES[response_key]

        response_found = False
        error = None
        try:
            while not response_found:
                # check for error
                if not error_queue.empty():
                    try:
                        error = error_queue.get(
                            timeout=QUEUE_CHECK_TIMEOUT_SECONDS)
                        assert False, get_formatted_stack_trace(error[0])
                    except queue.Empty:
                        assert False, "Error queue reported not empty but no error found in queue"
                # check for message to main
                try:
                    msg_to_main = output_queue.get(
                        timeout=QUEUE_CHECK_TIMEOUT_SECONDS)
                except queue.Empty:
                    continue

                # if message is found then handle it
                comm_type = msg_to_main["communication_type"]
                if comm_type == "log":
                    # if message is from a status beacon or handshake, just print it
                    print("### Log msg:",
                          msg_to_main["message"])  # allow-print
                elif comm_type == "board_connection_status_change":
                    # if message is some other form of expected message, just print it
                    print("###", msg_to_main)  # allow-print
                elif comm_type == "firmware_update":
                    print("&&&", msg_to_main)  # allow-print
                    response_found = (
                        msg_to_main["command"] == "update_completed"
                        and msg_to_main["firmware_type"] == "main")
                elif comm_type == expected_response["communication_type"]:
                    if msg_to_main.get("command", "") == "status_update":
                        print("###", msg_to_main)  # allow-print
                        continue
                    if "timestamp" in msg_to_main:
                        del msg_to_main["timestamp"]
                    # if message is the response, make sure it is as expected
                    print("$$$", msg_to_main)  # allow-print
                    if msg_to_main.get("command", "") == "get_metadata":
                        actual_metadata = msg_to_main.pop("metadata")
                        expected_metadata = expected_response.pop("metadata")
                        assert (
                            actual_metadata == expected_metadata
                        ), f"Incorrect metadata\nActual: {actual_metadata}\nExpected: {expected_metadata}"
                    assert (
                        msg_to_main == expected_response
                    ), f"{response_key}\nActual: {msg_to_main}\nExpected: {expected_response}"
                    if response_key == "start_md_1":
                        # sleep after data stream starts so data can be parsed and sent to file writer
                        print("Sleeping so data can be produced and parsed..."
                              )  # allow-print
                        time.sleep(2)
                        print("End sleep...")  # allow-print
                    elif response_key == "start_stim_2_1":
                        print("Sleeping to let stim complete")  # allow-print
                        time.sleep(20)
                    response_found = True
                elif msg_to_main.get(
                        "command",
                        None) == "set_time" or comm_type == "barcode_comm":
                    # this branch not needed for real board
                    print("@@@", msg_to_main)  # allow-print
                    continue
                else:
                    # o/w stop test
                    print("!!!", msg_to_main)  # allow-print
                    print("!!!", expected_response)  # allow-print
                    assert False, "unexpected msg sent to main"
        except AssertionError as e:
            error = e
            break

    # stop and join McComm
    if error:
        mc_process.hard_stop()
    else:
        mc_process.soft_stop()
    data_sent_to_fw = drain_queue(data_queue)
    mc_process.join()

    if error:
        raise error

    # do one last check of error_queue
    try:
        error = error_queue.get(timeout=QUEUE_CHECK_TIMEOUT_SECONDS)
        assert False, get_formatted_stack_trace(error[0])
    except queue.Empty:
        print(
            "No errors after Instrument Communication Process stopped and joined"
        )  # allow-print

    if len(data_sent_to_fw) == 0:
        assert False, "No data packets sent to File Writer"

    # test keys of dict going to file writer. tests on the actual data will be done in the full integration test
    test_num_wells = 24
    expected_fw_item = {"time_indices": None, "data_type": "magnetometer"}
    for well_idx in range(test_num_wells):
        module_config_values = list(DEFAULT_MAGNETOMETER_CONFIG[
            SERIAL_COMM_WELL_IDX_TO_MODULE_ID[well_idx]].values())
        if not any(module_config_values):
            continue

        num_channels_for_well = 0
        for sensor_start_idx in range(0, SERIAL_COMM_NUM_DATA_CHANNELS,
                                      SERIAL_COMM_NUM_CHANNELS_PER_SENSOR):
            num_channels_for_sensor = sum(
                module_config_values[sensor_start_idx:sensor_start_idx +
                                     SERIAL_COMM_NUM_CHANNELS_PER_SENSOR])
            num_channels_for_well += int(num_channels_for_sensor > 0)

        channel_dict = {"time_offsets": None}
        for channel_id in range(SERIAL_COMM_NUM_DATA_CHANNELS):
            if not module_config_values[channel_id]:
                continue
            channel_dict[channel_id] = None
        expected_fw_item[well_idx] = channel_dict
    expected_fw_item["is_first_packet_of_stream"] = None

    for actual_item in data_sent_to_fw:
        if actual_item["data_type"] == "stimulation":
            print("### Ignoring stim packet:", actual_item)  # allow-print
            continue
        assert actual_item.keys() == expected_fw_item.keys()
        for key, expected_item in expected_fw_item.items():
            if key in ("is_first_packet_of_stream", "time_indices",
                       "data_type"):
                continue
            item = actual_item[key]
            assert item.keys() == expected_item.keys()  # pylint: disable=no-member

    print("*** TEST COMPLETE ***")  # allow-print
示例#28
0
def test_DataAnalyzerProcess__logs_performance_metrics_after_creating_beta_2_data(
        four_board_analyzer_process_beta_2_mode, mocker):
    da_process = four_board_analyzer_process_beta_2_mode["da_process"]
    to_main_queue = four_board_analyzer_process_beta_2_mode["to_main_queue"]
    from_main_queue = four_board_analyzer_process_beta_2_mode[
        "from_main_queue"]
    board_queues = four_board_analyzer_process_beta_2_mode["board_queues"]

    # perform setup so performance logging values are initialized
    invoke_process_run_and_check_errors(da_process,
                                        perform_setup_before_loop=True)

    # set this to a lower value to speed up the test
    da_process._minimum_iteration_duration_seconds /= 10

    # mock functions to speed up test
    mocker.patch.object(data_analyzer,
                        "get_force_signal",
                        autospec=True,
                        return_value=np.zeros((2, 2)))
    mocker.patch.object(data_analyzer,
                        "peak_detector",
                        autospec=True,
                        side_effect=PeakDetectionError())

    # set magnetometer configuration
    expected_sampling_period_us = 10000
    num_data_points_per_second = MICRO_TO_BASE_CONVERSION // expected_sampling_period_us
    set_magnetometer_config(
        four_board_analyzer_process_beta_2_mode,
        {
            "magnetometer_config": GENERIC_BOARD_MAGNETOMETER_CONFIGURATION,
            "sampling_period": expected_sampling_period_us,
        },
    )
    # start managed acquisition
    start_command = get_mutable_copy_of_START_MANAGED_ACQUISITION_COMMUNICATION(
    )
    put_object_into_queue_and_raise_error_if_eventually_still_empty(
        start_command, from_main_queue)
    invoke_process_run_and_check_errors(da_process)
    # remove command receipt
    to_main_queue.get(timeout=QUEUE_CHECK_TIMEOUT_SECONDS)

    # create expected durations for metric creation
    expected_num_data_packets = MIN_NUM_SECONDS_NEEDED_FOR_ANALYSIS
    expected_data_creation_durs = [
        random.uniform(30, 70) for _ in range(expected_num_data_packets)
    ]
    mocker.patch.object(
        data_analyzer,
        "_get_secs_since_data_creation_start",
        autospec=True,
        side_effect=expected_data_creation_durs,
    )
    expected_data_analysis_durs = [random.uniform(20, 80) for _ in range(24)]
    mocker.patch.object(
        data_analyzer,
        "_get_secs_since_data_analysis_start",
        autospec=True,
        side_effect=expected_data_analysis_durs,
    )

    # create test data packets
    for packet_num in range(expected_num_data_packets):
        test_packet = copy.deepcopy(
            SIMPLE_BETA_2_CONSTRUCT_DATA_FROM_ALL_WELLS)
        test_packet["time_indices"] = (np.arange(
            num_data_points_per_second * packet_num,
            num_data_points_per_second * (packet_num + 1),
            dtype=np.int64,
        ) * expected_sampling_period_us)
        put_object_into_queue_and_raise_error_if_eventually_still_empty(
            test_packet, board_queues[0][0])
        invoke_process_run_and_check_errors(da_process)
    confirm_queue_is_eventually_of_size(
        to_main_queue, expected_num_data_packets * 2
    )  # Tanner (1/4/21): a log message is also put into queue after each waveform data dump

    actual = drain_queue(to_main_queue)[-1]["message"]
    assert actual["communication_type"] == "performance_metrics"
    assert actual["data_creation_duration"] == expected_data_creation_durs[-1]
    assert actual["data_creation_duration_metrics"] == {
        "max":
        max(expected_data_creation_durs),
        "min":
        min(expected_data_creation_durs),
        "stdev":
        round(stdev(expected_data_creation_durs), 6),
        "mean":
        round(
            sum(expected_data_creation_durs) /
            len(expected_data_creation_durs), 6),
    }
    assert actual["data_analysis_duration_metrics"] == {
        "max":
        max(expected_data_analysis_durs),
        "min":
        min(expected_data_analysis_durs),
        "stdev":
        round(stdev(expected_data_analysis_durs), 6),
        "mean":
        round(
            sum(expected_data_analysis_durs) /
            len(expected_data_analysis_durs), 6),
    }
    # values created in parent class
    assert "start_timepoint_of_measurements" not in actual
    assert "idle_iteration_time_ns" not in actual
    assert "longest_iterations" in actual
    assert "percent_use" in actual
    assert "percent_use_metrics" in actual

    # prevent BrokenPipeErrors
    drain_queue(board_queues[0][1])
示例#29
0
def test_OkCommunicationProcess_managed_acquisition__logs_performance_metrics_after_appropriate_number_of_read_cycles(
        four_board_comm_process, mocker):
    expected_idle_time = 1
    expected_start_timepoint = 7
    expected_stop_timepoint = 11
    expected_latest_percent_use = 100 * (
        1 - expected_idle_time /
        (expected_stop_timepoint - expected_start_timepoint))
    expected_percent_use_values = [40.1, 67.8, expected_latest_percent_use]
    expected_longest_iterations = list(
        range(INSTRUMENT_COMM_PERFOMANCE_LOGGING_NUM_CYCLES - 1))

    test_data_parse_dur_values = [
        0 for _ in range(INSTRUMENT_COMM_PERFOMANCE_LOGGING_NUM_CYCLES * 2)
    ]
    test_read_dur_values = [
        0 for _ in range(INSTRUMENT_COMM_PERFOMANCE_LOGGING_NUM_CYCLES * 2)
    ]
    for i in range(1, INSTRUMENT_COMM_PERFOMANCE_LOGGING_NUM_CYCLES * 2, 2):
        test_data_parse_dur_values[i] = i
        test_read_dur_values[i] = i // 2 + 1
    test_acquisition_values = [
        20 for _ in range(INSTRUMENT_COMM_PERFOMANCE_LOGGING_NUM_CYCLES)
    ]
    for i in range(1, INSTRUMENT_COMM_PERFOMANCE_LOGGING_NUM_CYCLES):
        test_acquisition_values[i] = test_acquisition_values[i -
                                                             1] + 10 * (i + 1)

    perf_counter_vals = list()
    for i in range(0, INSTRUMENT_COMM_PERFOMANCE_LOGGING_NUM_CYCLES * 2, 2):
        perf_counter_vals.append(test_read_dur_values[i])
        perf_counter_vals.append(test_read_dur_values[i + 1])
        perf_counter_vals.append(test_data_parse_dur_values[i])
        perf_counter_vals.append(test_data_parse_dur_values[i + 1])
        perf_counter_vals.append(test_acquisition_values[i // 2])
    mocker.patch.object(time, "perf_counter", side_effect=perf_counter_vals)
    mocker.patch.object(time,
                        "perf_counter_ns",
                        return_value=expected_stop_timepoint)
    mocker.patch.object(OkCommunicationProcess,
                        "_is_ready_to_read_from_fifo",
                        return_value=True)
    mocker.patch.object(
        parallelism_framework,
        "calculate_iteration_time_ns",
        autospec=True,
        side_effect=expected_longest_iterations,
    )

    ok_process = four_board_comm_process["ok_process"]
    board_queues = four_board_comm_process["board_queues"]
    ok_process._time_of_last_fifo_read[0] = datetime.datetime(  # pylint: disable=protected-access
        2020, 5, 28, 12, 58, 0, 0)
    ok_process._timepoint_of_last_fifo_read[0] = 10  # pylint: disable=protected-access
    ok_process._idle_iteration_time_ns = expected_idle_time  # pylint: disable=protected-access
    ok_process._minimum_iteration_duration_seconds = 0  # pylint: disable=protected-access
    ok_process._start_timepoint_of_last_performance_measurement = (  # pylint: disable=protected-access
        expected_start_timepoint)
    ok_process._percent_use_values = expected_percent_use_values[:-1]  # pylint: disable=protected-access

    test_fifo_reads = [
        produce_data(i + 2, 0)
        for i in range(INSTRUMENT_COMM_PERFOMANCE_LOGGING_NUM_CYCLES)
    ]
    fifo = TestingQueue()
    for read in test_fifo_reads:
        fifo.put_nowait(read)
    confirm_queue_is_eventually_of_size(
        fifo, INSTRUMENT_COMM_PERFOMANCE_LOGGING_NUM_CYCLES)
    queues = {"pipe_outs": {PIPE_OUT_FIFO: fifo}}
    simulator = FrontPanelSimulator(queues)
    simulator.initialize_board()
    ok_process.set_board_connection(0, simulator)
    board_queues[0][0].put_nowait(
        get_mutable_copy_of_START_MANAGED_ACQUISITION_COMMUNICATION())
    confirm_queue_is_eventually_of_size(board_queues[0][0], 1)

    invoke_process_run_and_check_errors(
        ok_process,
        num_iterations=INSTRUMENT_COMM_PERFOMANCE_LOGGING_NUM_CYCLES)

    assert_queue_is_eventually_not_empty(board_queues[0][1])
    queue_items = drain_queue(board_queues[0][1])
    actual = queue_items[-1]
    assert "message" in actual
    actual = actual["message"]

    expected_num_bytes = [len(read) for read in test_fifo_reads]
    expected_parsing_dur_values = [
        i * 2 + 1 for i in range(INSTRUMENT_COMM_PERFOMANCE_LOGGING_NUM_CYCLES)
    ]
    expected_read_dur_values = [
        i + 1 for i in range(INSTRUMENT_COMM_PERFOMANCE_LOGGING_NUM_CYCLES)
    ]
    expected_acquisition_values = [
        10 * (i + 1)
        for i in range(INSTRUMENT_COMM_PERFOMANCE_LOGGING_NUM_CYCLES)
    ]
    assert actual["communication_type"] == "performance_metrics"
    assert "idle_iteration_time_ns" not in actual
    assert "start_timepoint_of_measurements" not in actual
    assert actual["fifo_read_num_bytes"] == {
        "max": max(expected_num_bytes),
        "min": min(expected_num_bytes),
        "stdev": round(stdev(expected_num_bytes), 6),
        "mean": round(sum(expected_num_bytes) / len(expected_num_bytes), 6),
    }
    assert actual["fifo_read_duration"] == {
        "max":
        max(expected_read_dur_values),
        "min":
        min(expected_read_dur_values),
        "stdev":
        round(stdev(expected_read_dur_values), 6),
        "mean":
        round(
            sum(expected_read_dur_values) / len(expected_read_dur_values), 6),
    }
    assert actual["duration_between_acquisition"] == {
        "max":
        max(expected_acquisition_values),
        "min":
        min(expected_acquisition_values),
        "stdev":
        round(stdev(expected_acquisition_values), 6),
        "mean":
        round(
            sum(expected_acquisition_values) /
            len(expected_acquisition_values), 6),
    }
    assert actual["data_parsing_duration"] == {
        "max":
        max(expected_parsing_dur_values),
        "min":
        min(expected_parsing_dur_values),
        "stdev":
        round(stdev(expected_parsing_dur_values), 6),
        "mean":
        round(
            sum(expected_parsing_dur_values) /
            len(expected_parsing_dur_values), 6),
    }
    assert actual["percent_use"] == expected_latest_percent_use
    assert actual["percent_use_metrics"] == {
        "max":
        max(expected_percent_use_values),
        "min":
        min(expected_percent_use_values),
        "stdev":
        round(stdev(expected_percent_use_values), 6),
        "mean":
        round(
            sum(expected_percent_use_values) /
            len(expected_percent_use_values), 6),
    }
    num_longest_iterations = ok_process.num_longest_iterations
    assert actual["longest_iterations"] == expected_longest_iterations[
        -num_longest_iterations:]

    # Tanner (5/29/20): Closing a queue while it is not empty (especially when very full) causes BrokePipeErrors, so flushing it before the test ends prevents this
    drain_queue(board_queues[0][2])
示例#30
0
def test_DataAnalyzerProcess__does_not_process_data_packets_after_receiving_stop_managed_acquisition_command_until_receiving_first_packet_of_new_stream(
        four_board_analyzer_process_beta_2_mode, mocker):
    da_process = four_board_analyzer_process_beta_2_mode["da_process"]
    from_main_queue = four_board_analyzer_process_beta_2_mode[
        "from_main_queue"]
    to_main_queue = four_board_analyzer_process_beta_2_mode["to_main_queue"]
    incoming_data_queue = four_board_analyzer_process_beta_2_mode[
        "board_queues"][0][0]

    # mock so these since not using real data
    mocked_process_data = mocker.patch.object(da_process,
                                              "_process_beta_2_data",
                                              autospec=True,
                                              return_value={})

    invoke_process_run_and_check_errors(da_process,
                                        perform_setup_before_loop=True)
    # set config arbitrary sampling period
    test_sampling_period = 10000
    set_magnetometer_config(
        four_board_analyzer_process_beta_2_mode,
        {
            "magnetometer_config": GENERIC_BOARD_MAGNETOMETER_CONFIGURATION,
            "sampling_period": test_sampling_period,
        },
    )

    # start managed_acquisition
    put_object_into_queue_and_raise_error_if_eventually_still_empty(
        dict(START_MANAGED_ACQUISITION_COMMUNICATION), from_main_queue)
    invoke_process_run_and_check_errors(da_process)
    # send first packet of first stream and make sure it is processed
    test_data_packet = copy.deepcopy(
        SIMPLE_BETA_2_CONSTRUCT_DATA_FROM_ALL_WELLS)
    test_data_packet["is_first_packet_of_stream"] = True
    put_object_into_queue_and_raise_error_if_eventually_still_empty(
        test_data_packet, incoming_data_queue)
    invoke_process_run_and_check_errors(da_process)
    assert mocked_process_data.call_count == 1
    # send another packet of first stream and make sure it is processed
    test_data_packet = copy.deepcopy(
        SIMPLE_BETA_2_CONSTRUCT_DATA_FROM_ALL_WELLS)
    test_data_packet["is_first_packet_of_stream"] = False
    put_object_into_queue_and_raise_error_if_eventually_still_empty(
        test_data_packet, incoming_data_queue)
    invoke_process_run_and_check_errors(da_process)
    assert mocked_process_data.call_count == 2

    # stop managed acquisition and make sure next data packet in the first stream is not processed
    put_object_into_queue_and_raise_error_if_eventually_still_empty(
        dict(STOP_MANAGED_ACQUISITION_COMMUNICATION), from_main_queue)
    invoke_process_run_and_check_errors(da_process)
    test_data_packet = copy.deepcopy(
        SIMPLE_BETA_2_CONSTRUCT_DATA_FROM_ALL_WELLS)
    test_data_packet["is_first_packet_of_stream"] = False
    put_object_into_queue_and_raise_error_if_eventually_still_empty(
        test_data_packet, incoming_data_queue)
    invoke_process_run_and_check_errors(da_process)
    assert mocked_process_data.call_count == 2

    # start managed acquisition again and make sure next data packet in the first stream is not processed
    put_object_into_queue_and_raise_error_if_eventually_still_empty(
        dict(START_MANAGED_ACQUISITION_COMMUNICATION), from_main_queue)
    invoke_process_run_and_check_errors(da_process)
    test_data_packet = copy.deepcopy(
        SIMPLE_BETA_2_CONSTRUCT_DATA_FROM_ALL_WELLS)
    test_data_packet["is_first_packet_of_stream"] = False
    put_object_into_queue_and_raise_error_if_eventually_still_empty(
        test_data_packet, incoming_data_queue)
    invoke_process_run_and_check_errors(da_process)
    assert mocked_process_data.call_count == 2

    # send first data packet from second stream and make sure it is processed
    test_data_packet = copy.deepcopy(
        SIMPLE_BETA_2_CONSTRUCT_DATA_FROM_ALL_WELLS)
    test_data_packet["is_first_packet_of_stream"] = True
    put_object_into_queue_and_raise_error_if_eventually_still_empty(
        test_data_packet, incoming_data_queue)
    invoke_process_run_and_check_errors(da_process)
    assert mocked_process_data.call_count == 3

    # prevent BrokenPipeErrors
    drain_queue(to_main_queue)