def test_build_file_writer_objects__logs_warning__when_first_data_frame_period_of_read_is_not_expected_value_and_logging_level_is_debug( test_data_frame_period, test_description, mocker): mocked_put = mocker.patch.object(ok_comm, "put_log_message_into_queue", autospec=True) expected_queue = TestingQueue() first_data_frame_size = DATA_FRAME_SIZE_WORDS * 4 # number of bytes in a word test_bytearray = produce_data(1, 0)[:first_data_frame_size] test_bytearray.extend(produce_data(1, test_data_frame_period)) expected_logging_threshold = logging.DEBUG build_file_writer_objects( test_bytearray, "six_channels_32_bit__single_sample_index", expected_queue, expected_logging_threshold, ) expected_message = f"Detected period between first two data frames of FIFO read: {test_data_frame_period * TIMESTEP_CONVERSION_FACTOR} does not matched expected value: {DATA_FRAME_PERIOD}. Actual time indices: 0x0, {hex(test_data_frame_period * TIMESTEP_CONVERSION_FACTOR)}" mocked_put.assert_any_call( logging.DEBUG, expected_message, expected_queue, expected_logging_threshold, )
def test_build_file_writer_objects_performance(): # 10 iterations with 625 Hz data rate # # 1. cython parse_sensor_bytes: 266523479.8 # 2. parse_sensor_bytes (4): 253645684.3 # 3. parse_sensor_bytes (5): 246193920.3 num_cycles = math.ceil(CENTIMILLISECONDS_PER_SECOND / ROUND_ROBIN_PERIOD) test_bytearray = produce_data(num_cycles, 0) q = Queue() start = time.perf_counter_ns() num_iterations = 10 for _ in range(num_iterations): build_file_writer_objects( test_bytearray, "six_channels_32_bit__single_sample_index", q, logging.DEBUG, ) dur = time.perf_counter_ns() - start ns_per_iter = dur / num_iterations # print(f"ns per iterations: {ns_per_iter}") # pylint:disable=wrong-spelling-in-comment # Eli (4/8/21): this is commented code that is deliberately kept in the codebase since it is often toggled on/off during optimization assert ( ns_per_iter < 450000000 ) # Eli (10/20/20): bumped up from 300000000 to 450000000 because it was running a bit slow on windows in Github CI
def test_build_file_writer_objects__raises_error_if_format_name_not_recognized( patch_check_header, mocker): # Tanner (5/21/20) When the error is raised, the queue is closed before it finishes writing to Pipe, so mock to avoid error in test mocker.patch.object(ok_comm, "put_log_message_into_queue", autospec=True) q = TestingQueue() with pytest.raises(UnrecognizedDataFrameFormatNameError, match="fakeformat"): build_file_writer_objects( bytearray([0, 0, 0, 0, 0, 0, 0, 0]), "fakeformat", q, logging.DEBUG, )
def test_build_file_writer_objects__raises_error__when_first_data_frame_period_of_read_is_not_expected_value_and_logging_level_is_info_or_higher( test_data_frame_period, test_logging_level, test_description, mocker): # Tanner (7/10/20) When the error is raised, the queue is closed before it finishes writing to Pipe, so mock to avoid error in test mocker.patch.object(ok_comm, "put_log_message_into_queue", autospec=True) q = TestingQueue() first_data_frame_size = DATA_FRAME_SIZE_WORDS * 4 # number of bytes in a word test_bytearray = produce_data(1, 0)[:first_data_frame_size] test_bytearray.extend(produce_data(1, test_data_frame_period)) expected_error_string = f"Detected period between first two data frames of FIFO read: {test_data_frame_period * TIMESTEP_CONVERSION_FACTOR} does not matched expected value: {DATA_FRAME_PERIOD}. Actual time indices: 0x0, {hex(test_data_frame_period * TIMESTEP_CONVERSION_FACTOR)}" with pytest.raises(InvalidDataFramePeriodError, match=expected_error_string): build_file_writer_objects( test_bytearray, "six_channels_32_bit__single_sample_index", q, test_logging_level, )
def test_build_file_writer_objects__correctly_parses_a_real_data_cycle_from_jason( mocker, ): test_bytearray = build_jasons_data_cycle() expected_dict = {} for ch_num in range(24): expected_dict[ch_num] = { "is_reference_sensor": False, "well_index": ch_num, "data": None, } for ref_num in range(6): expected_dict[f"ref{ref_num}"] = { "is_reference_sensor": True, "reference_for_wells": REF_INDEX_TO_24_WELL_INDEX[ref_num], "data": None, } for frame in range(8): sample_idx = int(DATA_FROM_JASON[(frame * 9) + 2], 16) for adc in range(6): data_word = bytearray( struct.pack("<L", int(DATA_FROM_JASON[(frame * 9) + 3 + adc], 16))) is_reference_sensor, index, sensor_value = parse_sensor_bytes( data_word) data = np.array( [[sample_idx * TIMESTEP_CONVERSION_FACTOR], [sensor_value]], dtype=np.int32, ) key = f"ref{index}" if is_reference_sensor else index if expected_dict[key]["data"] is not None: expected_dict[key]["data"] = np.concatenate( (expected_dict[key]["data"], data), axis=1) else: expected_dict[key]["data"] = data logging_queue = ( TestingQueue() ) # Eli (3/16/20): if there isn't a reference to the queue that still exists, it gives a 'broken pipe' error mocker.patch.object( ok_comm, "put_log_message_into_queue", autospec=True ) # Tanner (3/3/21): For some reason this queue is still causing BrokenPipeErrors, so mocking the function that puts objects into it since that functionality is not tested here actual = build_file_writer_objects( test_bytearray, "six_channels_32_bit__single_sample_index", logging_queue, logging.DEBUG, ) for key in expected_dict: assert actual[key]["is_reference_sensor"] is expected_dict[key][ "is_reference_sensor"] if isinstance(key, str): assert actual[key]["reference_for_wells"] == expected_dict[key][ "reference_for_wells"] else: assert actual[key]["well_index"] == expected_dict[key][ "well_index"] np.testing.assert_equal(actual[key]["data"], expected_dict[key]["data"])
def test_build_file_writer_objects__returns_correct_values__with_six_channel_format__three_cycles( ): expected = dict() for i in range(6): expected[f"ref{i}"] = { "is_reference_sensor": True, "reference_for_wells": REF_INDEX_TO_24_WELL_INDEX[i], "data": None, } for i in range(24): expected[i] = { "is_reference_sensor": False, "well_index": i, "data": None } # build test and expected data test_bytearray = bytearray(0) for cycle in range(3): for frame in range(8): # add header test_bytearray.extend( build_header_magic_number_bytes(HEADER_MAGIC_NUMBER)) sample_index = ( cycle * ROUND_ROBIN_PERIOD // TIMESTEP_CONVERSION_FACTOR + frame * DATA_FRAME_PERIOD) # add sample index test_bytearray.extend(struct.pack("<L", sample_index)) # add channel data for adc_num in range(6): # add metadata byte adc_ch_num = frame metadata_byte = (adc_num << 4) + adc_ch_num test_bytearray.extend([metadata_byte]) # add value equal to position in data frame format test_data_byte = (48 * cycle) + (6 * frame) + adc_num test_bytearray.extend([test_data_byte, 0, 0]) # update expected dictionary data = np.array( [ [sample_index * TIMESTEP_CONVERSION_FACTOR], [test_data_byte - RAW_TO_SIGNED_CONVERSION_VALUE], ], dtype=np.int32, ) # determine if reference or construct if adc_ch_num % 2 == 1: key = f"ref{adc_num}" else: key = ADC_CH_TO_24_WELL_INDEX[adc_num][adc_ch_num] # add data appropriately if expected[key]["data"] is not None: expected[key]["data"] = np.concatenate( (expected[key]["data"], data), axis=1) else: expected[key]["data"] = data actual_queue = TestingQueue() actual = build_file_writer_objects( test_bytearray, "six_channels_32_bit__single_sample_index", actual_queue, logging.DEBUG, ) for key in expected: assert actual[key]["is_reference_sensor"] is expected[key][ "is_reference_sensor"] if isinstance(key, str): assert actual[key]["reference_for_wells"] == expected[key][ "reference_for_wells"] else: assert actual[key]["well_index"] == expected[key]["well_index"] np.testing.assert_equal(actual[key]["data"], expected[key]["data"]) # drain the queue to avoid broken pipe errors drain_queue(actual_queue)