def test_collection_index_validates_when_data_changes(self): document_model = DocumentModel.DocumentModel() document_controller = DocumentController.DocumentController( self.app.ui, document_model, workspace_id="library") with contextlib.closing(document_controller): d = numpy.random.randn(4, 4, 3, 3) data_and_metadata = DataAndMetadata.new_data_and_metadata( d, data_descriptor=DataAndMetadata.DataDescriptor(False, 2, 2)) data_item = DataItem.new_data_item(data_and_metadata) document_model.append_data_item(data_item) display_item = document_model.get_display_item_for_data_item( data_item) display_data_channel = display_item.display_data_channels[0] display_data_channel.collection_index = 3, 3 display_data = display_data_channel.get_calculated_display_values( True).display_data_and_metadata.data self.assertTrue(numpy.array_equal(display_data, d[3, 3, ...])) d2 = numpy.random.randn(2, 2, 3, 3) data_and_metadata2 = DataAndMetadata.new_data_and_metadata( d2, data_descriptor=DataAndMetadata.DataDescriptor(False, 2, 2)) display_item.data_item.set_xdata(data_and_metadata2) display_data2 = display_data_channel.get_calculated_display_values( True).display_data_and_metadata.data self.assertTrue(numpy.array_equal(display_data2, d2[1, 1, ...]))
def execute(self, **kwargs): # let the processing component do the processing and store result in the xdata field. # TODO: handle multiple sources (broadcasting) is_mapped = self.processing_component.is_scalar or kwargs.get( "mapping", "none") != "none" if is_mapped and len( self.processing_component.sources) == 1 and kwargs[ self.processing_component.sources[0] ["name"]].xdata.is_collection: src_name = self.processing_component.sources[0]["name"] data_source = typing.cast("Facade.DataSource", kwargs[src_name]) xdata = data_source.xdata self.__xdata = None for index in numpy.ndindex(xdata.navigation_dimension_shape): index_data_source = DataItem.DataSource( data_source._display_data_channel, data_source.graphic._graphic, xdata[index]) index_kw_args = {next(iter(kwargs.keys())): index_data_source} for k, v in list(kwargs.items())[1:]: index_kw_args[k] = v processed_data = self.processing_component.process( **index_kw_args) if isinstance(processed_data, DataAndMetadata.DataAndMetadata): # handle array data index_xdata = processed_data if self.__xdata is None: self.__data = numpy.empty( xdata.navigation_dimension_shape + index_xdata.datum_dimension_shape, dtype=index_xdata.data_dtype) self.__xdata = DataAndMetadata.new_data_and_metadata( self.__data, index_xdata.intensity_calibration, tuple(xdata.navigation_dimensional_calibrations) + tuple(index_xdata.datum_dimensional_calibrations), None, None, DataAndMetadata.DataDescriptor( xdata.is_sequence, xdata.collection_dimension_count, index_xdata.datum_dimension_count)) self.__data[index] = index_xdata.data elif isinstance(processed_data, DataAndMetadata.ScalarAndMetadata): # handle scalar data index_scalar = processed_data if self.__xdata is None: self.__data = numpy.empty( xdata.navigation_dimension_shape, dtype=type(index_scalar.value)) self.__xdata = DataAndMetadata.new_data_and_metadata( self.__data, index_scalar.calibration, tuple(xdata.navigation_dimensional_calibrations), None, None, DataAndMetadata.DataDescriptor( xdata.is_sequence, 0, xdata.collection_dimension_count)) self.__data[index] = index_scalar.value elif not self.processing_component.is_scalar: self.__xdata = self.processing_component.process(**kwargs)
def test_affine_transform_image_for_4d_data(self): data_descriptors = [ DataAndMetadata.DataDescriptor(True, 1, 2), DataAndMetadata.DataDescriptor(False, 2, 2), DataAndMetadata.DataDescriptor(True, 2, 1) ] for data_descriptor in data_descriptors: with self.subTest(data_descriptor=data_descriptor): with create_memory_profile_context() as profile_context: document_controller = profile_context.create_document_controller_with_application( ) document_model = document_controller.document_model data = numpy.zeros((5, 5, 5, 5)) if data_descriptor.collection_dimension_count == 2 and not data_descriptor.is_sequence: data[2:-2, 1:-1] = 1 elif data_descriptor.collection_dimension_count == 2 and data_descriptor.is_sequence: data[:, 2:-2, 1:-1] = 1 else: data[..., 2:-2, 1:-1] = 1 xdata = DataAndMetadata.new_data_and_metadata( data, data_descriptor=data_descriptor) api = Facade.get_api("~1.0", "~1.0") data_item = api.library.create_data_item_from_data_and_metadata( xdata) document_controller.selection.set(0) document_controller.selected_display_panel = None # use the document controller selection affine_transform = AffineTransformImage.AffineTransformMenuItem( api) affine_transform.menu_item_execute( api.application.document_controllers[0]) document_controller.periodic() # Can't convince the computation to update when changing the graphics, so just check that it got executed vector_a = data_item.graphics[0] vector_b = data_item.graphics[1] # # Rotate by 90 degrees vector_a.end = (0.75, 0.5) vector_b.end = (0.5, 0.75) # # Update computation document_controller.periodic() DocumentModel.evaluate_data(document_model.computations[0]) self.assertEqual(len(data_item.graphics), 2) self.assertEqual(api.library.data_item_count, 2) if data_descriptor.collection_dimension_count == 2 and not data_descriptor.is_sequence: self.assertTrue( numpy.allclose(document_model.data_items[1].data, numpy.rot90(data))) elif data_descriptor.collection_dimension_count == 2 and data_descriptor.is_sequence: self.assertTrue( numpy.allclose(document_model.data_items[1].data, numpy.rot90(data, axes=(1, 2)))) else: self.assertTrue( numpy.allclose(document_model.data_items[1].data, numpy.rot90(data, axes=(2, 3))))
def test_mapping_widget_to_image_on_3d_spectrum_image_uses_collection_dimensions( self): document_model = DocumentModel.DocumentModel() document_controller = DocumentController.DocumentController( self.app.ui, document_model, workspace_id="library") with contextlib.closing(document_controller): display_panel = document_controller.selected_display_panel document_controller.selected_display_panel.change_display_panel_content( {"type": "image"}) data_and_metadata = DataAndMetadata.new_data_and_metadata( numpy.ones((10, 10, 50)), data_descriptor=DataAndMetadata.DataDescriptor(False, 2, 1)) data_item = DataItem.new_data_item(data_and_metadata) document_model.append_data_item(data_item) display_item = document_model.get_display_item_for_data_item( data_item) display_panel.set_display_panel_display_item(display_item) header_height = display_panel.header_canvas_item.header_height display_panel.root_container.layout_immediate( (100 + header_height, 100)) # run test document_controller.tool_mode = "line-profile" display_panel.display_canvas_item.simulate_drag((20, 25), (65, 85)) self.assertEqual(display_item.graphics[0].vector, ((0.2, 0.25), (0.65, 0.85)))
def test_dimension_used_for_scale_marker_on_4d_diffraction_image_is_correct( self): document_model = DocumentModel.DocumentModel() document_controller = DocumentController.DocumentController( self.app.ui, document_model, workspace_id="library") with contextlib.closing(document_controller): display_panel = document_controller.selected_display_panel calibrations = [ Calibration.Calibration(units="y"), Calibration.Calibration(units="x"), Calibration.Calibration(units="a"), Calibration.Calibration(units="b") ] data_and_metadata = DataAndMetadata.new_data_and_metadata( numpy.ones((10, 10, 50, 50)), dimensional_calibrations=calibrations, data_descriptor=DataAndMetadata.DataDescriptor(False, 2, 2)) data_item = DataItem.new_data_item(data_and_metadata) document_model.append_data_item(data_item) display_item = document_model.get_display_item_for_data_item( data_item) display_panel.set_display_panel_display_item(display_item) header_height = display_panel.header_canvas_item.header_height display_panel.root_container.layout_immediate( (1000 + header_height, 1000)) # run test self.assertEqual( display_panel.display_canvas_item. _info_overlay_canvas_item_for_test. _dimension_calibration_for_test.units, "b")
def __create_data_item(self, channel_name: str) -> DataItem.DataItem: scan_calibrations = self.__grab_sync_info.scan_calibrations data_calibrations = self.__grab_sync_info.data_calibrations data_intensity_calibration = self.__grab_sync_info.data_intensity_calibration data_item = DataItem.DataItem(large_format=True) parameters = self.__multi_acquire_parameters[self.__current_parameters_index] data_item.title = f"{CameraDataChannel.title_base} ({channel_name}) #{self.__current_parameters_index+1}" self.__document_model.append_data_item(data_item) frames = parameters['frames'] sum_frames = self.__multi_acquire_settings['sum_frames'] scan_size = tuple(self.__grab_sync_info.scan_size) camera_readout_size = tuple(self.__grab_sync_info.camera_readout_size_squeezed) data_shape = scan_size + camera_readout_size if frames > 1 and not sum_frames: data_shape = (frames,) + data_shape data_descriptor = DataAndMetadata.DataDescriptor(frames > 1 and not sum_frames, 2, len(camera_readout_size)) data_item.reserve_data(data_shape=data_shape, data_dtype=numpy.dtype(numpy.float32), data_descriptor=data_descriptor) dimensional_calibrations = scan_calibrations + data_calibrations if frames > 1 and not sum_frames: dimensional_calibrations = (Calibration.Calibration(),) + tuple(dimensional_calibrations) data_item.dimensional_calibrations = dimensional_calibrations data_item.intensity_calibration = data_intensity_calibration data_item_metadata = data_item.metadata data_item_metadata["instrument"] = copy.deepcopy(self.__grab_sync_info.instrument_metadata) data_item_metadata["hardware_source"] = copy.deepcopy(self.__grab_sync_info.camera_metadata) data_item_metadata["scan"] = copy.deepcopy(self.__grab_sync_info.scan_metadata) data_item_metadata["MultiAcquire.settings"] = copy.deepcopy(self.__multi_acquire_settings) data_item_metadata["MultiAcquire.parameters"] = copy.deepcopy(self.__multi_acquire_parameters[self.__current_parameters_index]) data_item.metadata = data_item_metadata return data_item
def test_rgb_data_write_read_round_trip(self): for version in (3, 4): s = io.BytesIO() data_in = (numpy.random.randn(6, 4, 3) * 255).astype(numpy.uint8) data_descriptor_in = DataAndMetadata.DataDescriptor(False, 0, 2) dimensional_calibrations_in = [ Calibration.Calibration(1, 2, "nm"), Calibration.Calibration(2, 3, u"µm") ] intensity_calibration_in = Calibration.Calibration(4, 5, "six") metadata_in = { "abc": None, "": "", "one": [], "two": {}, "three": [1, None, 2] } xdata_in = DataAndMetadata.new_data_and_metadata( data_in, data_descriptor=data_descriptor_in, dimensional_calibrations=dimensional_calibrations_in, intensity_calibration=intensity_calibration_in, metadata=metadata_in) dm3_image_utils.save_image(xdata_in, s, version) s.seek(0) xdata = dm3_image_utils.load_image(s) self.assertTrue(numpy.array_equal(data_in, xdata.data)) self.assertEqual(data_descriptor_in, xdata.data_descriptor)
def test_data_slice_of_sequence_handles_calibrations(self): data = numpy.zeros((10, 100, 100), dtype=numpy.float32) intensity_calibration = Calibration.Calibration(0.1, 0.2, "I") dimensional_calibrations = [ Calibration.Calibration(0.11, 0.22, "S"), Calibration.Calibration(0.11, 0.22, "A"), Calibration.Calibration(0.111, 0.222, "B") ] xdata = DataAndMetadata.new_data_and_metadata( data, intensity_calibration=intensity_calibration, dimensional_calibrations=dimensional_calibrations, data_descriptor=DataAndMetadata.DataDescriptor(True, 0, 2)) self.assertFalse(xdata[3].is_sequence) self.assertTrue(xdata[3:4].is_sequence) self.assertAlmostEqual(xdata[3].intensity_calibration.offset, xdata.intensity_calibration.offset) self.assertAlmostEqual(xdata[3].intensity_calibration.scale, xdata.intensity_calibration.scale) self.assertEqual(xdata[3].intensity_calibration.units, xdata.intensity_calibration.units) self.assertAlmostEqual(xdata[3].dimensional_calibrations[0].offset, xdata.dimensional_calibrations[1].offset) self.assertAlmostEqual(xdata[3].dimensional_calibrations[0].scale, xdata.dimensional_calibrations[1].scale) self.assertEqual(xdata[3].dimensional_calibrations[0].units, xdata.dimensional_calibrations[1].units) self.assertAlmostEqual(xdata[3].dimensional_calibrations[1].offset, xdata.dimensional_calibrations[2].offset) self.assertAlmostEqual(xdata[3].dimensional_calibrations[1].scale, xdata.dimensional_calibrations[2].scale) self.assertEqual(xdata[3].dimensional_calibrations[1].units, xdata.dimensional_calibrations[2].units)
def test_metadata_write_read_round_trip(self): s = io.BytesIO() data_in = numpy.ones((6, 4), numpy.float32) data_descriptor_in = DataAndMetadata.DataDescriptor(False, 0, 2) dimensional_calibrations_in = [ Calibration.Calibration(1, 2, "nm"), Calibration.Calibration(2, 3, u"µm") ] intensity_calibration_in = Calibration.Calibration(4, 5, "six") metadata_in = { "abc": 1, "def": "abc", "efg": { "one": 1, "two": "TWO", "three": [3, 4, 5] } } xdata_in = DataAndMetadata.new_data_and_metadata( data_in, data_descriptor=data_descriptor_in, dimensional_calibrations=dimensional_calibrations_in, intensity_calibration=intensity_calibration_in, metadata=metadata_in) dm3_image_utils.save_image(xdata_in, s) s.seek(0) xdata = dm3_image_utils.load_image(s) self.assertEqual(metadata_in, xdata.metadata)
def test_data_timestamp_write_read_round_trip(self): for version in (3, 4): s = io.BytesIO() data_in = numpy.ones((6, 4), numpy.float32) data_descriptor_in = DataAndMetadata.DataDescriptor(False, 0, 2) dimensional_calibrations_in = [ Calibration.Calibration(1.1, 2.1, "nm"), Calibration.Calibration(2, 3, u"µm") ] intensity_calibration_in = Calibration.Calibration(4.4, 5.5, "six") metadata_in = dict() timestamp_in = datetime.datetime(2013, 11, 18, 14, 5, 4, 0) timezone_in = "America/Los_Angeles" timezone_offset_in = "-0700" xdata_in = DataAndMetadata.new_data_and_metadata( data_in, data_descriptor=data_descriptor_in, dimensional_calibrations=dimensional_calibrations_in, intensity_calibration=intensity_calibration_in, metadata=metadata_in, timestamp=timestamp_in, timezone=timezone_in, timezone_offset=timezone_offset_in) dm3_image_utils.save_image(xdata_in, s, version) s.seek(0) xdata = dm3_image_utils.load_image(s) self.assertEqual(timestamp_in, xdata.timestamp) self.assertEqual(timezone_in, xdata.timezone) self.assertEqual(timezone_offset_in, xdata.timezone_offset)
def test_signal_type_round_trip(self): for version in (3, 4): s = io.BytesIO() data_in = numpy.ones((12, ), numpy.float32) data_descriptor_in = DataAndMetadata.DataDescriptor(False, 0, 1) dimensional_calibrations_in = [Calibration.Calibration(1, 2, "eV")] intensity_calibration_in = Calibration.Calibration(4, 5, "e") metadata_in = {"hardware_source": {"signal_type": "EELS"}} xdata_in = DataAndMetadata.new_data_and_metadata( data_in, data_descriptor=data_descriptor_in, dimensional_calibrations=dimensional_calibrations_in, intensity_calibration=intensity_calibration_in, metadata=metadata_in) dm3_image_utils.save_image(xdata_in, s, version) s.seek(0) xdata = dm3_image_utils.load_image(s) metadata_expected = { 'hardware_source': { 'signal_type': 'EELS' }, 'Meta Data': { 'Format': 'Spectrum', 'Signal': 'EELS' } } self.assertEqual(metadata_expected, xdata.metadata)
def __fit_zero_loss_peak( self, spectrum_xdata: DataAndMetadata.DataAndMetadata ) -> DataAndMetadata.DataAndMetadata: z = int(spectrum_xdata.dimensional_calibrations[-1]. convert_from_calibrated_value(0.0)) calibration = copy.deepcopy( spectrum_xdata.datum_dimensional_calibrations[0]) ys = spectrum_xdata.data if spectrum_xdata.is_navigable: calibrations = list( copy.deepcopy( spectrum_xdata.navigation_dimensional_calibrations)) + [ calibration ] yss = numpy.reshape(ys, (numpy.product(ys.shape[:-1]), ) + (ys.shape[-1], )) fit_data = self._perform_fits(yss, z) data_descriptor = DataAndMetadata.DataDescriptor( False, spectrum_xdata.navigation_dimension_count, spectrum_xdata.datum_dimension_count) model_xdata = DataAndMetadata.new_data_and_metadata( numpy.reshape(fit_data, ys.shape[:-1] + (ys.shape[-1], )), data_descriptor=data_descriptor, dimensional_calibrations=calibrations, intensity_calibration=spectrum_xdata.intensity_calibration) else: poly_data = self._perform_fit(ys, z) model_xdata = DataAndMetadata.new_data_and_metadata( poly_data, dimensional_calibrations=[calibration], intensity_calibration=spectrum_xdata.intensity_calibration) return model_xdata
def test_changing_sequence_index_updates_display_range(self): with TestContext.create_memory_context() as test_context: document_model = test_context.create_document_model() data = numpy.zeros((3, 8, 8)) data[1, ...] = 1 data[2, ...] = 2 xdata = DataAndMetadata.new_data_and_metadata( data, data_descriptor=DataAndMetadata.DataDescriptor(True, 0, 2)) data_item = DataItem.new_data_item(xdata) document_model.append_data_item(data_item) display_item = document_model.get_display_item_for_data_item( data_item) display_data_channel = display_item.display_data_channels[0] self.assertEqual( display_data_channel.get_calculated_display_values( True).display_range, (0, 0)) self.assertEqual( display_data_channel.get_calculated_display_values( True).data_range, (0, 0)) display_data_channel.sequence_index = 1 self.assertEqual( display_data_channel.get_calculated_display_values( True).display_range, (1, 1)) self.assertEqual( display_data_channel.get_calculated_display_values( True).data_range, (1, 1))
def create_data_item(): nonlocal new_data_item, scan_xdata if number_frames > 1 and not scan_data_dict['settings'][ 'sum_frames']: data = scan_xdata.data[numpy.newaxis, ...] dimensional_calibrations = [ Calibration.Calibration() ] + list(scan_xdata.dimensional_calibrations) data_descriptor = DataAndMetadata.DataDescriptor( True, scan_xdata.data_descriptor. collection_dimension_count, scan_xdata.data_descriptor.datum_dimension_count) scan_xdata = DataAndMetadata.new_data_and_metadata( data, intensity_calibration=scan_xdata. intensity_calibration, dimensional_calibrations=dimensional_calibrations, metadata=scan_xdata.metadata, timestamp=scan_xdata.timestamp, data_descriptor=data_descriptor, timezone=scan_xdata.timezone, timezone_offset=scan_xdata.timezone_offset) new_data_item = self.__api.library.create_data_item_from_data_and_metadata( scan_xdata, title=title) data_item_ready_event.set()
def __create_data_item(self, channel_name: str, grab_sync_info: scan_base.ScanHardwareSource.GrabSynchronizedInfo) -> DataItem.DataItem: scan_calibrations = grab_sync_info.scan_calibrations data_calibrations = grab_sync_info.data_calibrations data_intensity_calibration = grab_sync_info.data_intensity_calibration _, data_shape = self.__calculate_axes_order_and_data_shape(grab_sync_info.axes_descriptor, grab_sync_info.scan_size, grab_sync_info.camera_readout_size_squeezed) large_format = numpy.prod(data_shape, dtype=numpy.int64) > 2048**2 * 10 data_item = DataItem.DataItem(large_format=large_format) data_item.title = f"{title_base} ({channel_name})" self.__document_model.append_data_item(data_item) # Only call "reserve_data" for HDF5 backed data items because it causes problems for ndata backed items. if large_format: axes_descriptor = grab_sync_info.axes_descriptor is_sequence = axes_descriptor.sequence_axes is not None collection_dimension_count = len(axes_descriptor.collection_axes) if axes_descriptor.collection_axes is not None else 0 datum_dimension_count = len(axes_descriptor.data_axes) if axes_descriptor.data_axes is not None else 0 data_descriptor = DataAndMetadata.DataDescriptor(is_sequence, collection_dimension_count, datum_dimension_count) data_item.reserve_data(data_shape=data_shape, data_dtype=numpy.dtype(numpy.float32), data_descriptor=data_descriptor) data_item.dimensional_calibrations = scan_calibrations + data_calibrations data_item.intensity_calibration = data_intensity_calibration data_item_metadata = data_item.metadata data_item_metadata["instrument"] = copy.deepcopy(grab_sync_info.instrument_metadata) data_item_metadata["hardware_source"] = copy.deepcopy(grab_sync_info.camera_metadata) data_item_metadata["scan"] = copy.deepcopy(grab_sync_info.scan_metadata) data_item.metadata = data_item_metadata return data_item
def test_exception_during_calculate_display_values_recovers_gracefully( self): with TestContext.create_memory_context() as test_context: document_controller = test_context.create_document_controller() document_model = document_controller.document_model d = numpy.random.randn(4, 4, 3, 3) data_and_metadata = DataAndMetadata.new_data_and_metadata( d, data_descriptor=DataAndMetadata.DataDescriptor(False, 2, 2)) data_item = DataItem.new_data_item(data_and_metadata) document_model.append_data_item(data_item) display_item = document_model.get_display_item_for_data_item( data_item) def next_calculated_display_values(): pass display_data_channel = display_item.display_data_channels[0] listener = display_data_channel.add_calculated_display_values_listener( next_calculated_display_values) with contextlib.closing(listener): display_data = display_data_channel.get_calculated_display_values( True).display_data_and_metadata.data # now run the test display_data_channel.collection_index = 2, 2 # should trigger the thread display_data = display_data_channel.get_calculated_display_values( True).display_data_and_metadata.data display_data_channel.collection_index = 2, 2 display_data = display_data_channel.get_calculated_display_values( True).display_data_and_metadata.data self.assertTrue(numpy.array_equal(display_data, d[2, 2, ...]))
def test_metadata_difficult_types_write_read_round_trip(self): for version in (3, 4): s = io.BytesIO() data_in = numpy.ones((6, 4), numpy.float32) data_descriptor_in = DataAndMetadata.DataDescriptor(False, 0, 2) dimensional_calibrations_in = [ Calibration.Calibration(1, 2, "nm"), Calibration.Calibration(2, 3, u"µm") ] intensity_calibration_in = Calibration.Calibration(4, 5, "six") metadata_in = { "abc": None, "": "", "one": [], "two": {}, "three": [1, None, 2] } xdata_in = DataAndMetadata.new_data_and_metadata( data_in, data_descriptor=data_descriptor_in, dimensional_calibrations=dimensional_calibrations_in, intensity_calibration=intensity_calibration_in, metadata=metadata_in) dm3_image_utils.save_image(xdata_in, s, version) s.seek(0) xdata = dm3_image_utils.load_image(s) metadata_expected = {"one": [], "two": {}, "three": [1, 2]} self.assertEqual(metadata_expected, xdata.metadata)
def update(self, data_and_metadata_list: typing.Sequence[DataAndMetadata.DataAndMetadata], state: str, view_id) -> None: frames = self.__multi_acquire_parameters[self.__current_parameters_index]['frames'] sum_frames = self.__multi_acquire_settings['sum_frames'] for i, data_and_metadata in enumerate(data_and_metadata_list): data_item = self.__data_items[i] scan_shape = data_and_metadata.data_shape data_shape_and_dtype = (tuple(scan_shape), data_and_metadata.data_dtype) data_descriptor = DataAndMetadata.DataDescriptor(frames > 1 and not sum_frames, 0, len(tuple(scan_shape))) dimensional_calibrations = data_and_metadata.dimensional_calibrations if frames > 1 and not sum_frames: dimensional_calibrations = (Calibration.Calibration(),) + tuple(dimensional_calibrations) data_shape = data_shape_and_dtype[0] data_shape = (frames,) + data_shape data_shape_and_dtype = (data_shape, data_shape_and_dtype[1]) intensity_calibration = data_and_metadata.intensity_calibration metadata = data_and_metadata.metadata metadata["MultiAcquire.settings"] = copy.deepcopy(self.__multi_acquire_settings) metadata["MultiAcquire.parameters"] = copy.deepcopy(self.__multi_acquire_parameters[self.__current_parameters_index]) data_metadata = DataAndMetadata.DataMetadata(data_shape_and_dtype, intensity_calibration, dimensional_calibrations, metadata=data_and_metadata.metadata, data_descriptor=data_descriptor) src_slice = (Ellipsis,) dst_slice = (Ellipsis,) if frames > 1: if sum_frames: existing_data = data_item.data if existing_data is not None: summed_data = existing_data[dst_slice] + data_and_metadata.data[src_slice] data_and_metadata._set_data(summed_data) else: dst_slice = (self.current_frames_index,) + dst_slice # type: ignore self.__document_model.update_data_item_partial(data_item, data_metadata, data_and_metadata, src_slice, dst_slice)
def test_cursor_over_2d_data_sequence_displays_correct_ordering_of_indices( self): document_model = DocumentModel.DocumentModel() document_controller = DocumentController.DocumentController( self.app.ui, document_model, workspace_id="library") with contextlib.closing(document_controller): display_panel = document_controller.selected_display_panel data_and_metadata = DataAndMetadata.new_data_and_metadata( numpy.ones((20, 100, 100), numpy.float64), data_descriptor=DataAndMetadata.DataDescriptor(True, 0, 2)) data_item = DataItem.new_data_item(data_and_metadata) document_model.append_data_item(data_item) display_item = document_model.get_display_item_for_data_item( data_item) display_item.display_data_channels[0].sequence_index = 4 display_item.calibration_style_id = "pixels-top-left" display_panel.set_display_panel_display_item(display_item) header_height = display_panel.header_canvas_item.header_height info_panel = document_controller.find_dock_panel("info-panel") display_panel.root_container.layout_immediate( (1000 + header_height, 1000)) display_panel.display_canvas_item.mouse_entered() document_controller.periodic() display_panel.display_canvas_item.mouse_position_changed( 500, 500, Graphics.NullModifiers()) document_controller.periodic() self.assertEqual(info_panel.label_row_1.text, "Position: 50.0, 50.0, 4.0") self.assertEqual(info_panel.label_row_2.text, "Value: 1") self.assertIsNone(info_panel.label_row_3.text, None) display_panel.display_canvas_item.mouse_exited()
def test_dimension_used_for_scale_marker_on_2d_data_stack_is_correct(self): with TestContext.create_memory_context() as test_context: document_controller = test_context.create_document_controller() document_model = document_controller.document_model display_panel = document_controller.selected_display_panel calibrations = [ Calibration.Calibration(units="s"), Calibration.Calibration(units="y"), Calibration.Calibration(units="x") ] data_and_metadata = DataAndMetadata.new_data_and_metadata( numpy.ones((50, 10, 10)), dimensional_calibrations=calibrations, data_descriptor=DataAndMetadata.DataDescriptor(True, 0, 2)) data_item = DataItem.new_data_item(data_and_metadata) document_model.append_data_item(data_item) display_item = document_model.get_display_item_for_data_item( data_item) display_panel.set_display_panel_display_item(display_item) header_height = display_panel.header_canvas_item.header_height display_panel.root_container.layout_immediate( (1000 + header_height, 1000)) # run test self.assertEqual( display_panel.display_canvas_item. _info_overlay_canvas_item_for_test. _dimension_calibration_for_test.units, "x")
def test_display_data_is_1d_for_collection_of_1d_datum(self): # this tests the changes of 1d collections of 1d data. see #529. with TestContext.create_memory_context() as test_context: document_controller = test_context.create_document_controller() document_model = document_controller.document_model data_and_metadata = DataAndMetadata.new_data_and_metadata( numpy.ones((2, 8), float), data_descriptor=DataAndMetadata.DataDescriptor(False, 1, 1)) data_item = DataItem.new_data_item(data_and_metadata) document_model.append_data_item(data_item) display_item = document_model.get_display_item_for_data_item( data_item) display_data_channel = display_item.display_data_channels[0] self.assertEqual( (8, ), display_data_channel.get_calculated_display_values( True).display_data_and_metadata.data_shape) self.assertEqual( float, display_data_channel.get_calculated_display_values( True).display_data_and_metadata.data_dtype) self.assertEqual((8, ), display_data_channel.display_data_shape) self.assertEqual( 1, len( display_data_channel.get_datum_calibrations( data_item.dimensional_calibrations)))
def signal_to_xdata( signal: hyperspy.signals.BaseSignal ) -> DataAndMetadata.DataAndMetadata: dimensional_calibrations = list() for axis in signal.axes_manager.navigation_axes + signal.axes_manager.signal_axes: dimensional_calibrations.append( Calibration.Calibration(axis.offset, axis.scale, axis.units)) if len(signal.axes_manager.signal_axes) == 0: data_descriptor = DataAndMetadata.DataDescriptor( False, 0, len(signal.axes_manager.navigation_axes)) else: data_descriptor = DataAndMetadata.DataDescriptor( False, len(signal.axes_manager.navigation_axes), len(signal.axes_manager.signal_axes)) return DataAndMetadata.new_data_and_metadata( signal.data, dimensional_calibrations=dimensional_calibrations, data_descriptor=data_descriptor)
def test_reference_images_load_properly(self): shape_data_descriptors = ( ((3, ), DataAndMetadata.DataDescriptor(False, 0, 1)), # spectrum ((3, 2), DataAndMetadata.DataDescriptor(False, 1, 1)), # 1d collection of spectra ((3, 4, 5), DataAndMetadata.DataDescriptor(False, 2, 1)), # 2d collection of spectra ((3, 2), DataAndMetadata.DataDescriptor(True, 0, 1)), # sequence of spectra ((3, 2), DataAndMetadata.DataDescriptor(False, 0, 2)), # image ((4, 3, 2), DataAndMetadata.DataDescriptor(False, 1, 2)), # 1d collection of images ((3, 4, 5), DataAndMetadata.DataDescriptor(True, 0, 2)), # sequence of images ) for (shape, data_descriptor), version in itertools.product( shape_data_descriptors, (3, 4)): dimensional_calibrations = list() for index, dimension in enumerate(shape): dimensional_calibrations.append( Calibration.Calibration(1.0 + 0.1 * index, 2.0 + 0.2 * index, "µ" + "n" * index)) intensity_calibration = Calibration.Calibration(4, 5, "six") data = numpy.arange(numpy.product(shape), dtype=numpy.float32).reshape(shape) name = f"ref_{'T' if data_descriptor.is_sequence else 'F'}_{data_descriptor.collection_dimension_count}_{data_descriptor.datum_dimension_count}.dm{version}" # import pathlib # xdata = DataAndMetadata.new_data_and_metadata(data, dimensional_calibrations=dimensional_calibrations, intensity_calibration=intensity_calibration, data_descriptor=data_descriptor) # file_path = pathlib.Path(__file__).parent / "resources" / name # with file_path.open('wb') as f: # dm3_image_utils.save_image(xdata, f, version) try: s = io.BytesIO(pkgutil.get_data(__name__, f"resources/{name}")) xdata = dm3_image_utils.load_image(s) self.assertAlmostEqual(intensity_calibration.scale, xdata.intensity_calibration.scale, 6) self.assertAlmostEqual(intensity_calibration.offset, xdata.intensity_calibration.offset, 6) self.assertEqual(intensity_calibration.units, xdata.intensity_calibration.units) for c1, c2 in zip(dimensional_calibrations, xdata.dimensional_calibrations): self.assertAlmostEqual(c1.scale, c2.scale, 6) self.assertAlmostEqual(c1.offset, c2.offset, 6) self.assertEqual(c1.units, c2.units) self.assertEqual(data_descriptor, xdata.data_descriptor) self.assertTrue(numpy.array_equal(data, xdata.data)) # print(f"{name} {data_descriptor} PASS") except Exception as e: print(f"{name} {data_descriptor} FAIL") raise
def test_sequence_index_validates_when_data_changes(self): with TestContext.create_memory_context() as test_context: document_controller = test_context.create_document_controller() document_model = document_controller.document_model d = numpy.random.randn(4, 3, 3) data_and_metadata = DataAndMetadata.new_data_and_metadata( d, data_descriptor=DataAndMetadata.DataDescriptor(True, 0, 2)) data_item = DataItem.new_data_item(data_and_metadata) document_model.append_data_item(data_item) display_item = document_model.get_display_item_for_data_item( data_item) display_data_channel = display_item.display_data_channels[0] display_data_channel.sequence_index = 3 display_data = display_data_channel.get_calculated_display_values( True).display_data_and_metadata.data self.assertTrue(numpy.array_equal(display_data, d[3, ...])) d2 = numpy.random.randn(2, 3, 3) data_and_metadata2 = DataAndMetadata.new_data_and_metadata( d2, data_descriptor=DataAndMetadata.DataDescriptor(True, 0, 2)) display_item.data_item.set_xdata(data_and_metadata2) display_data2 = display_data_channel.get_calculated_display_values( True).display_data_and_metadata.data self.assertTrue(numpy.array_equal(display_data2, d2[1, ...]))
def test_cursor_over_1d_multiple_data_displays_without_exception(self): with TestContext.create_memory_context() as test_context: document_model = test_context.create_document_model() data_and_metadata = DataAndMetadata.new_data_and_metadata( numpy.zeros((4, 1000), numpy.float64), data_descriptor=DataAndMetadata.DataDescriptor(False, 1, 1)) data_item = DataItem.new_data_item(data_and_metadata) document_model.append_data_item(data_item) display_item = document_model.get_display_item_for_data_item( data_item) display_item.calibration_style_id = "pixels-top-left" p, v = display_item.get_value_and_position_text((500, )) self.assertEqual(p, "500.0, 0.0") self.assertEqual(v, "0")
def test_create_data_item_from_data_as_sequence(self): with TestContext.create_memory_context() as test_context: document_controller = test_context.create_document_controller_with_application( ) document_model = document_controller.document_model api = Facade.get_api("~1.0", "~1.0") library = api.library data_and_metadata = DataAndMetadata.new_data_and_metadata( numpy.zeros((8, 4, 5)), data_descriptor=DataAndMetadata.DataDescriptor(True, 0, 2)) data_item = library.create_data_item_from_data_and_metadata( data_and_metadata, "three") self.assertEqual(library.data_item_count, 1) self.assertTrue(document_model.data_items[0].is_sequence)
def test_cursor_over_1d_image_without_exception_x(self): with TestContext.create_memory_context() as test_context: document_model = test_context.create_document_model() data_item = DataItem.new_data_item( DataAndMetadata.new_data_and_metadata( numpy.zeros((4, 25)), data_descriptor=DataAndMetadata.DataDescriptor( False, 1, 1))) document_model.append_data_item(data_item) display_item = document_model.get_display_item_for_data_item( data_item) display_item.display_type = "image" p, v = display_item.get_value_and_position_text((2, 20)) self.assertEqual(p, "20.0, 2.0") self.assertEqual(v, "0")
def test_cursor_over_1d_multiple_data_displays_without_exception(self): document_model = DocumentModel.DocumentModel() with contextlib.closing(document_model): data_and_metadata = DataAndMetadata.new_data_and_metadata( numpy.zeros((4, 1000), numpy.float64), data_descriptor=DataAndMetadata.DataDescriptor(False, 1, 1)) data_item = DataItem.new_data_item(data_and_metadata) document_model.append_data_item(data_item) display_item = document_model.get_display_item_for_data_item( data_item) display_item.calibration_style_id = "pixels-top-left" p, v = display_item.get_value_and_position_text( display_item.display_data_channel, (500, )) self.assertEqual(p, "500.0, 0.0") self.assertEqual(v, "0")
def update(self, data_and_metadata: DataAndMetadata.DataAndMetadata, state: str, scan_shape: Geometry.IntSize, dest_sub_area: Geometry.IntRect, sub_area: Geometry.IntRect, view_id) -> None: # there are a few techniques for getting data into a data item. this method prefers directly calling the # document model method update_data_item_partial, which is thread safe. if that method is not available, it # falls back to the data item method set_data_and_metadata, which must be called from the main thread. # the hardware source also supplies a data channel which is thread safe and ends up calling set_data_and_metadata # but we skip that so that the updates fit into this class instead. frames = self.__multi_acquire_parameters[self.__current_parameters_index]['frames'] sum_frames = self.__multi_acquire_settings['sum_frames'] collection_rank = len(tuple(scan_shape)) data_shape_and_dtype = (tuple(scan_shape) + data_and_metadata.data_shape[collection_rank:], data_and_metadata.data_dtype) data_descriptor = DataAndMetadata.DataDescriptor(frames > 1 and not sum_frames, collection_rank, len(data_and_metadata.data_shape) - collection_rank) dimensional_calibrations = data_and_metadata.dimensional_calibrations if frames > 1 and not sum_frames: dimensional_calibrations = (Calibration.Calibration(),) + tuple(dimensional_calibrations) data_shape = data_shape_and_dtype[0] data_shape = (frames,) + data_shape data_shape_and_dtype = (data_shape, data_shape_and_dtype[1]) intensity_calibration = data_and_metadata.intensity_calibration if self.__multi_acquire_settings['use_multi_eels_calibration']: metadata = data_and_metadata.metadata.get('hardware_source', {}) counts_per_electron = metadata.get('counts_per_electron', 1) exposure_s = metadata.get('exposure', self.__multi_acquire_parameters[self.__current_parameters_index]['exposure_ms']*0.001) _number_frames = 1 if not sum_frames else frames intensity_scale = (data_and_metadata.intensity_calibration.scale / counts_per_electron / data_and_metadata.dimensional_calibrations[-1].scale / exposure_s / _number_frames) intensity_calibration = Calibration.Calibration(scale=intensity_scale) metadata = data_and_metadata.metadata metadata["MultiAcquire.settings"] = copy.deepcopy(self.__multi_acquire_settings) metadata["MultiAcquire.parameters"] = copy.deepcopy(self.__multi_acquire_parameters[self.__current_parameters_index]) data_metadata = DataAndMetadata.DataMetadata(data_shape_and_dtype, intensity_calibration, dimensional_calibrations, metadata=data_and_metadata.metadata, data_descriptor=data_descriptor) src_slice = sub_area.slice + (Ellipsis,) dst_slice = dest_sub_area.slice + (Ellipsis,) if frames > 1: if sum_frames: existing_data = self.__data_item.data if existing_data is not None: existing_data[dst_slice] += data_and_metadata.data[src_slice] else: dst_slice = (self.current_frames_index,) + dst_slice # type: ignore self.__document_model.update_data_item_partial(self.__data_item, data_metadata, data_and_metadata, src_slice, dst_slice) self.update_progress(dest_sub_area.bottom)
def update(self, data_and_metadata: DataAndMetadata.DataAndMetadata, state: str, scan_shape: Geometry.IntSize, dest_sub_area: Geometry.IntRect, sub_area: Geometry.IntRect, view_id) -> None: # This method is always called with a collection of 1d or 2d data. Re-order axes as required and remove length-1-axes. axes_descriptor = self.__grab_sync_info.axes_descriptor # Calibrations, data descriptor and shape estimates are updated accordingly. dimensional_calibrations = data_and_metadata.dimensional_calibrations data = data_and_metadata.data axes_order, data_shape = self.__calculate_axes_order_and_data_shape(axes_descriptor, scan_shape, data.shape[len(tuple(scan_shape)):]) assert len(axes_order) == data.ndim data = numpy.moveaxis(data, axes_order, list(range(data.ndim))) dimensional_calibrations = numpy.array(dimensional_calibrations)[axes_order].tolist() is_sequence = axes_descriptor.sequence_axes is not None collection_dimension_count = len(axes_descriptor.collection_axes) if axes_descriptor.collection_axes is not None else 0 datum_dimension_count = len(axes_descriptor.data_axes) if axes_descriptor.data_axes is not None else 0 src_slice = tuple() dst_slice = tuple() for index in axes_order: if index >= len(sub_area.slice): src_slice += (slice(None),) else: src_slice += (sub_area.slice[index],) if index >= len(dest_sub_area.slice): dst_slice += (slice(None),) else: dst_slice += (dest_sub_area.slice[index],) if is_sequence and data.shape[0] == 1: data = numpy.squeeze(data, axis=0) dimensional_calibrations = dimensional_calibrations[1:] src_slice = src_slice[1:] dst_slice = dst_slice[1:] is_sequence = False data_descriptor = DataAndMetadata.DataDescriptor(is_sequence, collection_dimension_count, datum_dimension_count) data_and_metadata = DataAndMetadata.new_data_and_metadata(data, data_and_metadata.intensity_calibration, dimensional_calibrations, data_and_metadata.metadata, None, data_descriptor, None, None) data_metadata = DataAndMetadata.DataMetadata((tuple(data_shape), data_and_metadata.data_dtype), data_and_metadata.intensity_calibration, data_and_metadata.dimensional_calibrations, metadata=data_and_metadata.metadata, data_descriptor=data_descriptor) self.__document_model.update_data_item_partial(self.__data_item, data_metadata, data_and_metadata, src_slice, dst_slice)