def __update_drift_log_data_item(self, delta_nm_data: _NDArray) -> None:
     # must be called on main thread
     # check __drift_changed_event_listener to see if the logger has been closed
     if self.__drift_changed_event_listener:
         self.__ensure_drift_log_data_item()
         assert self.__data_item
         offset_nm_xdata = DataAndMetadata.new_data_and_metadata(
             delta_nm_data,
             intensity_calibration=Calibration.Calibration(units="nm"))
         self.__data_item.set_data_and_metadata(offset_nm_xdata)
Example #2
0
 def execute(self, eels_spectrum_data_item, zlp_model, **kwargs) -> None:
     try:
         spectrum_xdata = eels_spectrum_data_item.xdata
         assert spectrum_xdata.is_datum_1d
         assert spectrum_xdata.datum_dimensional_calibrations[
             0].units == "eV"
         eels_spectrum_xdata = spectrum_xdata
         model_xdata = None
         subtracted_xdata = None
         if zlp_model._data_structure.entity:
             entity_id = zlp_model._data_structure.entity.entity_type.entity_id
             for component in Registry.get_components_by_type("zlp-model"):
                 # print(f"{entity_id=} {component.zero_loss_peak_model_id=}")
                 if entity_id == component.zero_loss_peak_model_id:
                     fit_result = component.fit_zero_loss_peak(
                         spectrum_xdata=spectrum_xdata)
                     model_xdata = fit_result["zero_loss_peak_model"]
                     # use 'or' to avoid doing subtraction if subtracted_spectrum already present
                     subtracted_xdata = fit_result.get(
                         "subtracted_spectrum",
                         None) or Core.calibrated_subtract_spectrum(
                             spectrum_xdata, model_xdata)
         if model_xdata is None:
             model_xdata = DataAndMetadata.new_data_and_metadata(
                 numpy.zeros_like(eels_spectrum_xdata.data),
                 intensity_calibration=eels_spectrum_xdata.
                 intensity_calibration,
                 dimensional_calibrations=eels_spectrum_xdata.
                 dimensional_calibrations)
         if subtracted_xdata is None:
             subtracted_xdata = DataAndMetadata.new_data_and_metadata(
                 eels_spectrum_xdata.data,
                 intensity_calibration=eels_spectrum_xdata.
                 intensity_calibration,
                 dimensional_calibrations=eels_spectrum_xdata.
                 dimensional_calibrations)
         self.__model_xdata = model_xdata
         self.__subtracted_xdata = subtracted_xdata
     except Exception as e:
         import traceback
         print(traceback.format_exc())
         print(e)
         raise
Example #3
0
 def test_display_data_is_2d_for_collection_of_1d_datum(self):
     with TestContext.create_memory_context() as test_context:
         document_controller = test_context.create_document_controller()
         document_model = document_controller.document_model
         data_and_metadata = DataAndMetadata.new_data_and_metadata(
             numpy.ones((2, 8), numpy.float64),
             data_descriptor=DataAndMetadata.DataDescriptor(False, 1, 1))
         data_item = DataItem.new_data_item(data_and_metadata)
         document_model.append_data_item(data_item)
         display_item = document_model.get_display_item_for_data_item(
             data_item)
         display_data_channel = display_item.display_data_channels[0]
         self.assertEqual(
             display_data_channel.get_calculated_display_values(
                 True).display_data_and_metadata.data_shape, (2, 8))
         self.assertEqual(
             display_data_channel.get_calculated_display_values(
                 True).display_data_and_metadata.data_dtype, numpy.float64)
         self.assertEqual(display_data_channel.display_data_shape, (2, 8))
 def test_cursor_over_1d_image_without_exception_x(self):
     with TestContext.create_memory_context() as test_context:
         document_model = test_context.create_document_model()
         data_item = DataItem.new_data_item(DataAndMetadata.new_data_and_metadata(numpy.zeros((4, 25)), data_descriptor=DataAndMetadata.DataDescriptor(False, 1, 1)))
         document_model.append_data_item(data_item)
         display_item = document_model.get_display_item_for_data_item(data_item)
         display_item.display_type = "image"
         p, v = display_item.get_value_and_position_text(display_item.display_data_channel, (2, 20))
         self.assertEqual(p, "20.0, 2.0")
         self.assertEqual(v, "0")
Example #5
0
 def test_create_data_item_from_data_as_sequence(self):
     with TestContext.create_memory_context() as test_context:
         document_controller = test_context.create_document_controller_with_application()
         document_model = document_controller.document_model
         api = Facade.get_api("~1.0", "~1.0")
         library = api.library
         data_and_metadata =  DataAndMetadata.new_data_and_metadata(numpy.zeros((8, 4, 5)), data_descriptor=DataAndMetadata.DataDescriptor(True, 0, 2))
         data_item = library.create_data_item_from_data_and_metadata(data_and_metadata, "three")
         self.assertEqual(library.data_item_count, 1)
         self.assertTrue(document_model.data_items[0].is_sequence)
Example #6
0
 def test_image_j_produces_proper_data_types(self):
     io_delegate = TIFF_IO.TIFFIODelegate_ImageJ(API())
     for t in (numpy.int16, numpy.uint16, numpy.int32, numpy.uint32, numpy.int64, numpy.uint64, numpy.float32, numpy.float64):
         b = io.BytesIO()
         xdata_w = DataAndMetadata.new_data_and_metadata(numpy.zeros((16, 16), dtype=t))
         io_delegate.write_data_and_metadata_stream(xdata_w, b)
         b.seek(0)
         xdata_r = io_delegate.read_data_and_metadata_from_stream(b)
         self.assertEqual(xdata_w.data_shape, xdata_r.data_shape)
         self.assertIn(xdata_r.data.dtype, (numpy.uint16, numpy.float32))
Example #7
0
 def test_create_data_item_from_data_as_sequence(self):
     document_model = DocumentModel.DocumentModel()
     document_controller = self.app.create_document_controller(document_model, "library")
     with contextlib.closing(document_controller):
         api = Facade.get_api("~1.0", "~1.0")
         library = api.library
         data_and_metadata =  DataAndMetadata.new_data_and_metadata(numpy.zeros((8, 4, 5)), data_descriptor=DataAndMetadata.DataDescriptor(True, 0, 2))
         data_item = library.create_data_item_from_data_and_metadata(data_and_metadata, "three")
         self.assertEqual(library.data_item_count, 1)
         self.assertTrue(document_model.data_items[0].is_sequence)
Example #8
0
    def test_reference_images_load_properly(self):
        shape_data_descriptors = (
            ((3, ), DataAndMetadata.DataDescriptor(False, 0, 1)),  # spectrum
            ((3, 2),
             DataAndMetadata.DataDescriptor(False, 1,
                                            1)),  # 1d collection of spectra
            ((3, 4, 5),
             DataAndMetadata.DataDescriptor(False, 2,
                                            1)),  # 2d collection of spectra
            ((3, 2), DataAndMetadata.DataDescriptor(True, 0,
                                                    1)),  # sequence of spectra
            ((3, 2), DataAndMetadata.DataDescriptor(False, 0, 2)),  # image
            ((4, 3, 2),
             DataAndMetadata.DataDescriptor(False, 1,
                                            2)),  # 1d collection of images
            ((3, 4, 5),
             DataAndMetadata.DataDescriptor(True, 0, 2)),  # sequence of images
        )
        for (shape, data_descriptor), version in itertools.product(
                shape_data_descriptors, (3, 4)):
            dimensional_calibrations = list()
            for index, dimension in enumerate(shape):
                dimensional_calibrations.append(
                    Calibration.Calibration(1.0 + 0.1 * index,
                                            2.0 + 0.2 * index,
                                            "µ" + "n" * index))
            intensity_calibration = Calibration.Calibration(4, 5, "six")
            data = numpy.arange(numpy.product(shape),
                                dtype=numpy.float32).reshape(shape)

            name = f"ref_{'T' if data_descriptor.is_sequence else 'F'}_{data_descriptor.collection_dimension_count}_{data_descriptor.datum_dimension_count}.dm{version}"

            # import pathlib
            # xdata = DataAndMetadata.new_data_and_metadata(data, dimensional_calibrations=dimensional_calibrations, intensity_calibration=intensity_calibration, data_descriptor=data_descriptor)
            # file_path = pathlib.Path(__file__).parent / "resources" / name
            # with file_path.open('wb') as f:
            #     dm3_image_utils.save_image(xdata, f, version)

            try:
                s = io.BytesIO(pkgutil.get_data(__name__, f"resources/{name}"))
                xdata = dm3_image_utils.load_image(s)
                self.assertAlmostEqual(intensity_calibration.scale,
                                       xdata.intensity_calibration.scale, 6)
                self.assertAlmostEqual(intensity_calibration.offset,
                                       xdata.intensity_calibration.offset, 6)
                self.assertEqual(intensity_calibration.units,
                                 xdata.intensity_calibration.units)
                for c1, c2 in zip(dimensional_calibrations,
                                  xdata.dimensional_calibrations):
                    self.assertAlmostEqual(c1.scale, c2.scale, 6)
                    self.assertAlmostEqual(c1.offset, c2.offset, 6)
                    self.assertEqual(c1.units, c2.units)
                self.assertEqual(data_descriptor, xdata.data_descriptor)
                self.assertTrue(numpy.array_equal(data, xdata.data))
                # print(f"{name} {data_descriptor} PASS")
            except Exception as e:
                print(f"{name} {data_descriptor} FAIL")
                raise
Example #9
0
def row(shape: DataAndMetadata.Shape2dType,
        start: int = None,
        stop: int = None) -> DataAndMetadata.DataAndMetadata:
    start_0 = start if start is not None else 0
    stop_0 = stop if stop is not None else shape[0]
    start_1 = start if start is not None else 0
    stop_1 = stop if stop is not None else shape[1]
    data = numpy.meshgrid(numpy.linspace(start_1, stop_1, shape[1]),
                          numpy.linspace(start_0, stop_0, shape[0]))[1]
    return DataAndMetadata.new_data_and_metadata(data)
 def test_data_element_to_extended_data_conversion(self):
     data = numpy.ones((8, 6), int)
     intensity_calibration = Calibration.Calibration(offset=1, scale=1.1, units="one")
     dimensional_calibrations = [Calibration.Calibration(offset=2, scale=2.1, units="two"), Calibration.Calibration(offset=3, scale=2.2, units="two")]
     metadata = {"hardware_source": {"one": 1, "two": "b"}}
     timestamp = datetime.datetime.now()
     data_descriptor = DataAndMetadata.DataDescriptor(is_sequence=False, collection_dimension_count=1, datum_dimension_count=1)
     xdata = DataAndMetadata.new_data_and_metadata(data, intensity_calibration=intensity_calibration, dimensional_calibrations=dimensional_calibrations, metadata=metadata, timestamp=timestamp, data_descriptor=data_descriptor)
     data_element = ImportExportManager.create_data_element_from_extended_data(xdata)
     new_xdata = ImportExportManager.convert_data_element_to_data_and_metadata(data_element)
     self.assertTrue(numpy.array_equal(data, new_xdata.data))
     self.assertNotEqual(id(new_xdata.intensity_calibration), id(intensity_calibration))
     self.assertEqual(new_xdata.intensity_calibration, intensity_calibration)
     self.assertNotEqual(id(new_xdata.dimensional_calibrations[0]), id(dimensional_calibrations[0]))
     self.assertEqual(new_xdata.dimensional_calibrations, dimensional_calibrations)
     self.assertNotEqual(id(new_xdata.metadata), id(metadata))
     self.assertEqual(new_xdata.metadata, metadata)
     self.assertNotEqual(id(new_xdata.data_descriptor), id(data_descriptor))
     self.assertEqual(new_xdata.data_descriptor, data_descriptor)
Example #11
0
 def start(self) -> typing.Optional[DataAndMetadata.DataAndMetadata]:
     # returns the full scan readout, including flyback pixels
     camera_readout_shape = self.__camera_device.get_expected_dimensions(
         self.__camera_frame_parameters.get("binning", 1))
     if self.__camera_frame_parameters.get(
             "processing") == "sum_project":
         camera_readout_shape = camera_readout_shape[1:]
     elif self.__camera_frame_parameters.get(
             "processing") == "sum_masked":
         camera_readout_shape = (len(self.__camera_device.mask_array)
                                 if self.__camera_device.mask_array
                                 is not None else 1, )
     self.__data = numpy.zeros(self.__scan_shape + camera_readout_shape,
                               numpy.float32)
     data_descriptor = DataAndMetadata.DataDescriptor(
         False, len(self.__scan_shape), len(camera_readout_shape))
     self.__xdata = DataAndMetadata.new_data_and_metadata(
         self.__data, data_descriptor=data_descriptor)
     return self.__xdata
Example #12
0
 def get_scan_data(self, frame_parameters, channel) -> numpy.ndarray:
     size = Geometry.IntSize.make(
         frame_parameters.subscan_pixel_size if frame_parameters.
         subscan_pixel_size else frame_parameters.size)
     offset_m = self.actual_offset_m  # stage position - beam shift + drift
     fov_size_nm = Geometry.FloatSize.make(
         frame_parameters.fov_size_nm
     ) if frame_parameters.fov_size_nm else Geometry.FloatSize(
         frame_parameters.fov_nm, frame_parameters.fov_nm)
     if frame_parameters.subscan_fractional_size:
         subscan_fractional_size = Geometry.FloatSize.make(
             frame_parameters.subscan_fractional_size)
         used_fov_size_nm = Geometry.FloatSize(
             height=fov_size_nm.height * subscan_fractional_size.height,
             width=fov_size_nm.width * subscan_fractional_size.width)
     else:
         used_fov_size_nm = fov_size_nm
     center_nm = Geometry.FloatPoint.make(frame_parameters.center_nm)
     if frame_parameters.subscan_fractional_center:
         subscan_fractional_center = Geometry.FloatPoint.make(
             frame_parameters.subscan_fractional_center
         ) - Geometry.FloatPoint(y=0.5, x=0.5)
         fc = subscan_fractional_center.rotate(
             frame_parameters.rotation_rad)
         center_nm += Geometry.FloatPoint(y=fc.y * fov_size_nm.height,
                                          x=fc.x * fov_size_nm.width)
     extra = int(
         math.ceil(
             max(size.height * math.sqrt(2) - size.height,
                 size.width * math.sqrt(2) - size.width)))
     extra_nm = Geometry.FloatPoint(
         y=(extra / size.height) * used_fov_size_nm[0],
         x=(extra / size.width) * used_fov_size_nm[1])
     used_size = size + Geometry.IntSize(height=extra, width=extra)
     data = numpy.zeros((used_size.height, used_size.width), numpy.float32)
     self.sample.plot_features(data, offset_m, used_fov_size_nm, extra_nm,
                               center_nm, used_size)
     noise_factor = 0.3
     total_rotation = frame_parameters.rotation_rad
     if frame_parameters.subscan_rotation:
         total_rotation -= frame_parameters.subscan_rotation
     if total_rotation != 0:
         inner_height = size.height / used_size.height
         inner_width = size.width / used_size.width
         inner_bounds = ((1.0 - inner_height) * 0.5,
                         (1.0 - inner_width) * 0.5), (inner_height,
                                                      inner_width)
         data = Core.function_crop_rotated(
             DataAndMetadata.new_data_and_metadata(data), inner_bounds,
             -total_rotation).data
     else:
         data = data[extra // 2:extra // 2 + size.height,
                     extra // 2:extra // 2 + size.width]
     return (data + numpy.random.randn(size.height, size.width) *
             noise_factor) * frame_parameters.pixel_time_us
Example #13
0
 def test_display_data_is_2d_for_2d_sequence(self):
     with TestContext.create_memory_context() as test_context:
         document_controller = test_context.create_document_controller()
         document_model = document_controller.document_model
         data_and_metadata = DataAndMetadata.new_data_and_metadata(
             numpy.ones((4, 16, 16), numpy.float64),
             data_descriptor=DataAndMetadata.DataDescriptor(True, 0, 2))
         data_item = DataItem.DataItem(numpy.ones((8, ), numpy.float))
         document_model.append_data_item(data_item)
         display_item = document_model.get_display_item_for_data_item(
             data_item)
         display_item.data_item.set_xdata(data_and_metadata)
         display_data_channel = display_item.display_data_channels[0]
         self.assertEqual(
             display_data_channel.get_calculated_display_values(
                 True).display_data_and_metadata.data_shape, (16, 16))
         self.assertEqual(
             display_data_channel.get_calculated_display_values(
                 True).display_data_and_metadata.data_dtype, numpy.float64)
         self.assertEqual(display_data_channel.display_data_shape, (16, 16))
Example #14
0
def radius(shape: DataAndMetadata.Shape2dType,
           normalize: bool = True) -> DataAndMetadata.DataAndMetadata:
    start_0 = -1 if normalize else -shape[0] * 0.5
    stop_0 = -start_0
    start_1 = -1 if normalize else -shape[1] * 0.5
    stop_1 = -start_1
    icol, irow = numpy.meshgrid(numpy.linspace(start_1, stop_1, shape[1]),
                                numpy.linspace(start_0, stop_0, shape[0]),
                                sparse=True)
    data = numpy.sqrt(icol * icol + irow * irow)
    return DataAndMetadata.new_data_and_metadata(data)
 def test_cursor_over_1d_sequence_data_displays_without_exception(self):
     with TestContext.create_memory_context() as test_context:
         document_model = test_context.create_document_model()
         data_and_metadata = DataAndMetadata.new_data_and_metadata(numpy.zeros((4, 1000), numpy.float64), data_descriptor=DataAndMetadata.DataDescriptor(True, 0, 1))
         data_item = DataItem.new_data_item(data_and_metadata)
         document_model.append_data_item(data_item)
         display_item = document_model.get_display_item_for_data_item(data_item)
         display_item.calibration_style_id = "pixels-top-left"
         p, v = display_item.get_value_and_position_text(display_item.display_data_channel, (500,))
         self.assertEqual(p, "500.0, 0.0")
         self.assertEqual(v, "0")
Example #16
0
 def test_cursor_over_1d_multiple_data_but_2_datum_dimensions_displays_without_exception(self):
     document_model = DocumentModel.DocumentModel()
     with contextlib.closing(document_model):
         data_and_metadata = DataAndMetadata.new_data_and_metadata(numpy.zeros((4, 1000), numpy.float64), data_descriptor=DataAndMetadata.DataDescriptor(False, 0, 2))
         data_item = DataItem.new_data_item(data_and_metadata)
         document_model.append_data_item(data_item)
         display_item = document_model.get_display_item_for_data_item(data_item)
         display_item.calibration_style_id = "pixels-top-left"
         p, v = display_item.get_value_and_position_text(display_item.display_data_channel, (500,))
         self.assertEqual(p, "500.0, 0.0")
         self.assertEqual(v, "0")
 def test_time_zone_in_extended_data_to_data_element_to_data_item_conversion(self):
     # test the whole path, redundant?
     data = numpy.ones((8, 6), int)
     metadata = {"description": {"time_zone": {"tz": "+0300", "dst": "+60"}}, "hardware_source": {"one": 1, "two": "b"}}
     timestamp = datetime.datetime(2013, 11, 18, 14, 5, 4, 1)
     xdata = DataAndMetadata.new_data_and_metadata(data, metadata=metadata, timestamp=timestamp)
     data_element = ImportExportManager.create_data_element_from_extended_data(xdata)
     with contextlib.closing(ImportExportManager.create_data_item_from_data_element(data_element)) as data_item:
         self.assertEqual(data_item.metadata["description"]["time_zone"]["tz"], "+0300")
         self.assertEqual(data_item.metadata["description"]["time_zone"]["dst"], "+60")
         self.assertEqual("2013-11-18 14:05:04.000001", str(data_item.created))
Example #18
0
def map_thickness_xdata(src_xdata: DataAndMetadata.DataAndMetadata, progress_fn=None) -> typing.Optional[DataAndMetadata.DataAndMetadata]:
    data = numpy.empty((src_xdata.data_shape[0:2]), numpy.float32)
    for row in range(src_xdata.data_shape[0]):
        if row > 0 and row % 10 == 0:
            if callable(progress_fn):
                progress_fn(row)
        for column in range(src_xdata.data_shape[1]):
            l, r, s = sum_zlp(src_xdata.data[row, column, :])
            data[row, column] = numpy.log(numpy.sum(src_xdata.data[row, column, :]) / s)
    dimensional_calibrations = src_xdata.dimensional_calibrations[0:-1]
    return DataAndMetadata.new_data_and_metadata(data, dimensional_calibrations=dimensional_calibrations)
Example #19
0
 def test_display_data_is_2d_for_collection_of_1d_datum(self):
     document_model = DocumentModel.DocumentModel()
     document_controller = DocumentController.DocumentController(
         self.app.ui, document_model, workspace_id="library")
     with contextlib.closing(document_controller):
         data_and_metadata = DataAndMetadata.new_data_and_metadata(
             numpy.ones((2, 8), numpy.float64),
             data_descriptor=DataAndMetadata.DataDescriptor(False, 1, 1))
         data_item = DataItem.new_data_item(data_and_metadata)
         document_model.append_data_item(data_item)
         display_item = document_model.get_display_item_for_data_item(
             data_item)
         display_data_channel = display_item.display_data_channels[0]
         self.assertEqual(
             display_data_channel.get_calculated_display_values(
                 True).display_data_and_metadata.data_shape, (2, 8))
         self.assertEqual(
             display_data_channel.get_calculated_display_values(
                 True).display_data_and_metadata.data_dtype, numpy.float64)
         self.assertEqual(display_data_channel.display_data_shape, (2, 8))
Example #20
0
 def test_create_rgba_sequence_should_work(self):
     with TestContext.create_memory_context() as test_context:
         document_controller = test_context.create_document_controller()
         document_model = document_controller.document_model
         data = (numpy.random.rand(4, 64, 64, 3) * 255).astype(numpy.uint8)
         data_item = DataItem.new_data_item(
             DataAndMetadata.new_data_and_metadata(
                 data,
                 data_descriptor=DataAndMetadata.DataDescriptor(True, 0,
                                                                2)))
         document_model.append_data_item(data_item)
         display_item = document_model.get_display_item_for_data_item(
             data_item)
         display_data_channel = display_item.display_data_channels[0]
         self.assertEqual(
             display_data_channel.get_calculated_display_values(
                 True).display_data_and_metadata.data_dtype, numpy.uint8)
         self.assertEqual(
             display_data_channel.get_calculated_display_values(
                 True).display_data_and_metadata.data_shape[-1], 3)
Example #21
0
def row(shape: DataAndMetadata.Shape2dType,
        start: typing.Optional[int] = None,
        stop: typing.Optional[int] = None) -> DataAndMetadata.DataAndMetadata:
    start_0 = start if start is not None else 0
    stop_0 = stop if stop is not None else shape[0]
    start_1 = start if start is not None else 0
    stop_1 = stop if stop is not None else shape[1]
    data: _ImageDataType = numpy.meshgrid(
        numpy.linspace(start_1, stop_1, shape[1]),
        numpy.linspace(start_0, stop_0, shape[0]))[1]  # type: ignore
    return DataAndMetadata.new_data_and_metadata(data)
Example #22
0
 def align_stack(
     stack: _NDArray,
     task_object: typing.Optional[Task.TaskContextManager] = None
 ) -> typing.Tuple[_NDArray, _NDArray]:
     # Calculate cross-correlation of the image stack
     number_frames = stack.shape[0]
     if task_object is not None:
         task_object.update_progress(_("Starting image alignment."),
                                     (0, number_frames))
     # Pre-allocate an array for the shifts we'll measure
     shifts = numpy.zeros((number_frames, 2))
     # initial reference slice is first slice
     ref = stack[0][:]
     ref_shift: numpy.typing.NDArray[typing.Any] = numpy.array([0, 0])
     for index, _slice in enumerate(stack):
         if task_object is not None:
             task_object.update_progress(
                 _("Cross correlating frame {}.").format(index),
                 (index + 1, number_frames), None)
         # TODO: make interpolation factor variable
         # (it is hard-coded to 100 here.)
         ref_xdata = DataAndMetadata.new_data_and_metadata(ref)
         _slice_xdata = DataAndMetadata.new_data_and_metadata(_slice)
         # Calculate image shifts
         shifts[index] = ref_shift + numpy.array(
             xd.register_translation(ref_xdata, _slice_xdata, 100))
         ref = _slice[:]
         ref_shift = shifts[index]
     # sum image needs to be big enough for shifted images
     sum_image = numpy.zeros(ref.shape)
     # add the images to the registered stack
     for index, _slice in enumerate(stack):
         if task_object is not None:
             task_object.update_progress(
                 _("Summing frame {}.").format(index),
                 (index + 1, number_frames), None)
         _slice_xdata = DataAndMetadata.new_data_and_metadata(_slice)
         shifted_slice_data = xd.shift(_slice_xdata, shifts[index])
         assert shifted_slice_data
         sum_image += shifted_slice_data.data
     return sum_image, shifts
Example #23
0
def function_rgb(
    red_data_and_metadata_in: _DataAndMetadataIndeterminateSizeLike,
    green_data_and_metadata_in: _DataAndMetadataIndeterminateSizeLike,
    blue_data_and_metadata_in: _DataAndMetadataIndeterminateSizeLike
) -> DataAndMetadata.DataAndMetadata:

    red_data_and_metadata_c = DataAndMetadata.promote_indeterminate_array(
        red_data_and_metadata_in)
    green_data_and_metadata_c = DataAndMetadata.promote_indeterminate_array(
        green_data_and_metadata_in)
    blue_data_and_metadata_c = DataAndMetadata.promote_indeterminate_array(
        blue_data_and_metadata_in)

    shape = DataAndMetadata.determine_shape(red_data_and_metadata_c,
                                            green_data_and_metadata_c,
                                            blue_data_and_metadata_c)

    if shape is None:
        raise ValueError("RGB: data shapes do not match or are indeterminate")

    red_data_and_metadata = DataAndMetadata.promote_constant(
        red_data_and_metadata_c, shape)
    green_data_and_metadata = DataAndMetadata.promote_constant(
        green_data_and_metadata_c, shape)
    blue_data_and_metadata = DataAndMetadata.promote_constant(
        blue_data_and_metadata_c, shape)

    channels = (blue_data_and_metadata, green_data_and_metadata,
                red_data_and_metadata)

    if any([
            not Image.is_data_valid(data_and_metadata.data)
            for data_and_metadata in channels
    ]):
        raise ValueError("RGB: invalid data")

    rgb_image = numpy.empty(shape + (3, ), numpy.uint8)
    for channel_index, channel in enumerate(channels):
        data = channel._data_ex
        if data.dtype.kind in 'iu':
            rgb_image[..., channel_index] = numpy.clip(data, 0, 255)
        elif data.dtype.kind in 'f':
            rgb_image[...,
                      channel_index] = numpy.clip(numpy.multiply(data, 255), 0,
                                                  255)

    return DataAndMetadata.new_data_and_metadata(
        rgb_image,
        intensity_calibration=red_data_and_metadata.intensity_calibration,
        dimensional_calibrations=red_data_and_metadata.dimensional_calibrations
    )
Example #24
0
 def test_metadata_export_large_integer(self):
     s = io.BytesIO()
     data_in = numpy.ones((6, 4), numpy.float32)
     data_descriptor_in = DataAndMetadata.DataDescriptor(False, 0, 2)
     dimensional_calibrations_in = [
         Calibration.Calibration(1, 2, "nm"),
         Calibration.Calibration(2, 3, u"µm")
     ]
     intensity_calibration_in = Calibration.Calibration(4, 5, "six")
     metadata_in = {"abc": 999999999999}
     xdata_in = DataAndMetadata.new_data_and_metadata(
         data_in,
         data_descriptor=data_descriptor_in,
         dimensional_calibrations=dimensional_calibrations_in,
         intensity_calibration=intensity_calibration_in,
         metadata=metadata_in)
     dm3_image_utils.save_image(xdata_in, s)
     s.seek(0)
     xdata = dm3_image_utils.load_image(s)
     metadata_expected = {"abc": 999999999999}
     self.assertEqual(metadata_expected, xdata.metadata)
Example #25
0
 def __create_spectrum_image_xdata(self, dtype=numpy.float32):
     data = numpy.zeros((8, 8, 1024), dtype)
     for row in range(data.shape[0]):
         for column in range(data.shape[1]):
             data[row, column, :] = numpy.random.uniform(10, 1000, 1024)
     intensity_calibration = Calibration.Calibration(units="~")
     dimensional_calibrations = [
         Calibration.Calibration(units="nm"),
         Calibration.Calibration(units="nm"),
         Calibration.Calibration(scale=2.0, units="eV")
     ]
     data_descriptor = DataAndMetadata.DataDescriptor(
         is_sequence=False,
         collection_dimension_count=2,
         datum_dimension_count=1)
     xdata = DataAndMetadata.new_data_and_metadata(
         data,
         intensity_calibration=intensity_calibration,
         dimensional_calibrations=dimensional_calibrations,
         data_descriptor=data_descriptor)
     return xdata
Example #26
0
def new_with_data(data: numpy.ndarray, *,
                  intensity_calibration: Calibration.Calibration = None,
                  dimensional_calibrations: DataAndMetadata.CalibrationListType = None,
                  metadata: dict = None,
                  timestamp: datetime.datetime = None,
                  data_descriptor: DataAndMetadata.DataDescriptor = None) -> DataAndMetadata.DataAndMetadata:
    return DataAndMetadata.new_data_and_metadata(data,
                                                 intensity_calibration=intensity_calibration,
                                                 dimensional_calibrations=dimensional_calibrations,
                                                 metadata=metadata,
                                                 timestamp=timestamp,
                                                 data_descriptor=data_descriptor)
Example #27
0
 def test_display_data_is_2d_for_2d_sequence(self):
     document_model = DocumentModel.DocumentModel()
     document_controller = DocumentController.DocumentController(
         self.app.ui, document_model, workspace_id="library")
     with contextlib.closing(document_controller):
         data_and_metadata = DataAndMetadata.new_data_and_metadata(
             numpy.ones((4, 16, 16), numpy.float64),
             data_descriptor=DataAndMetadata.DataDescriptor(True, 0, 2))
         data_item = DataItem.DataItem(numpy.ones((8, ), numpy.float))
         document_model.append_data_item(data_item)
         display_item = document_model.get_display_item_for_data_item(
             data_item)
         display_item.data_item.set_xdata(data_and_metadata)
         display_data_channel = display_item.display_data_channels[0]
         self.assertEqual(
             display_data_channel.get_calculated_display_values(
                 True).display_data_and_metadata.data_shape, (16, 16))
         self.assertEqual(
             display_data_channel.get_calculated_display_values(
                 True).display_data_and_metadata.data_dtype, numpy.float64)
         self.assertEqual(display_data_channel.display_data_shape, (16, 16))
Example #28
0
 def prepare_section(self) -> scan_base.SynchronizedScanBehaviorAdjustments:
     # this method must be thread safe
     # start with the context frame parameters and adjust for the drift region
     adjustments = scan_base.SynchronizedScanBehaviorAdjustments()
     frame_parameters = copy.deepcopy(self.__scan_frame_parameters)
     context_size = Geometry.FloatSize.make(frame_parameters.size)
     drift_channel_id = self.__scan_hardware_source.drift_channel_id
     drift_region = Geometry.FloatRect.make(self.__scan_hardware_source.drift_region)
     drift_rotation = self.__scan_hardware_source.drift_rotation
     if drift_channel_id is not None and drift_region is not None:
         drift_channel_index = self.__scan_hardware_source.get_channel_index(drift_channel_id)
         frame_parameters.subscan_pixel_size = int(context_size.height * drift_region.height * 4), int(context_size.width * drift_region.width * 4)
         if frame_parameters.subscan_pixel_size[0] >= 8 or frame_parameters.subscan_pixel_size[1] >= 8:
             frame_parameters.subscan_fractional_size = drift_region.height, drift_region.width
             frame_parameters.subscan_fractional_center = drift_region.center.y, drift_region.center.x
             frame_parameters.subscan_rotation = drift_rotation
             # attempt to keep drift area in roughly the same position by adding in the accumulated correction.
             frame_parameters.center_nm = tuple(Geometry.FloatPoint.make(frame_parameters.center_nm) + self.__center_nm)
             xdatas = self.__scan_hardware_source.record_immediate(frame_parameters, [drift_channel_index])
             # new_offset = self.__scan_hardware_source.stem_controller.drift_offset_m
             if self.__last_xdata:
                 # calculate offset. if data shifts down/right, offset will be negative (register_translation convention).
                 # offset = Geometry.FloatPoint.make(xd.register_translation(self.__last_xdata, xdatas[0], upsample_factor=10))
                 quality, offset = xd.register_template(self.__last_xdata, xdatas[0])
                 offset = Geometry.FloatPoint.make(offset)
                 offset_nm = Geometry.FloatSize(
                     h=xdatas[0].dimensional_calibrations[0].convert_to_calibrated_size(offset.y),
                     w=xdatas[0].dimensional_calibrations[1].convert_to_calibrated_size(offset.x))
                 # calculate adjustment (center_nm). if center_nm positive, data shifts up/left.
                 # rotate back into context reference frame
                 offset_nm = offset_nm.rotate(-drift_rotation)
                 offset_nm -= self.__center_nm  # adjust for center_nm adjustment above
                 delta_nm = offset_nm - self.__last_offset_nm
                 self.__last_offset_nm = offset_nm
                 offset_nm_xy = math.sqrt(pow(offset_nm.height, 2) + pow(offset_nm.width, 2))
                 self.__offset_nm_data = numpy.hstack([self.__offset_nm_data, numpy.array([offset_nm.height, offset_nm.width, offset_nm_xy]).reshape(3, 1)])
                 offset_nm_xdata = DataAndMetadata.new_data_and_metadata(self.__offset_nm_data, intensity_calibration=Calibration.Calibration(units="nm"))
                 def update_data_item(offset_nm_xdata: DataAndMetadata.DataAndMetadata) -> None:
                     self.__data_item.set_data_and_metadata(offset_nm_xdata)
                 self.__scan_hardware_source._call_soon(functools.partial(update_data_item, offset_nm_xdata))
                 # report the difference from the last time we reported, but negative since center_nm positive shifts up/left
                 adjustments.offset_nm = -delta_nm
                 self.__center_nm -= delta_nm
                 # print(f"{self.__last_offset_nm} {adjustments.offset_nm} [{(new_offset - self.__last_offset) * 1E9}] {offset} {frame_parameters.fov_nm}")
                 if False:  # if offset_nm > drift area / 10?
                     # retake to provide reference at new offset
                     frame_parameters.center_nm = tuple(Geometry.FloatPoint.make(frame_parameters.center_nm) + adjustments.offset_nm)
                     self.__scan_frame_parameters.center_nm = frame_parameters.center_nm
                     xdatas = self.__scan_hardware_source.record_immediate(frame_parameters, [drift_channel_index])
                     new_offset = self.__scan_hardware_source.stem_controller.drift_offset_m
             else:
                 self.__last_xdata = xdatas[0]
     return adjustments
Example #29
0
 def __fit_zero_loss_peak(self, spectrum_xdata: DataAndMetadata.DataAndMetadata) -> DataAndMetadata.DataAndMetadata:
     reference_frame = Calibration.ReferenceFrameAxis(spectrum_xdata.datum_dimensional_calibrations[0], spectrum_xdata.datum_dimension_shape[0])
     z = reference_frame.convert_to_pixel(Calibration.Coordinate(Calibration.CoordinateType.CALIBRATED, 0)).int_value
     calibration = copy.deepcopy(spectrum_xdata.datum_dimensional_calibrations[0])
     ys = spectrum_xdata.data
     if spectrum_xdata.is_navigable:
         calibrations = list(copy.deepcopy(spectrum_xdata.navigation_dimensional_calibrations)) + [calibration]
         yss = numpy.reshape(ys, (numpy.product(ys.shape[:-1]),) + (ys.shape[-1],))
         fit_data = self._perform_fits(yss, z)
         data_descriptor = DataAndMetadata.DataDescriptor(False, spectrum_xdata.navigation_dimension_count,
                                                          spectrum_xdata.datum_dimension_count)
         model_xdata = DataAndMetadata.new_data_and_metadata(
             numpy.reshape(fit_data, ys.shape[:-1] + (ys.shape[-1],)),
             data_descriptor=data_descriptor,
             dimensional_calibrations=calibrations,
             intensity_calibration=spectrum_xdata.intensity_calibration)
     else:
         poly_data = self._perform_fit(ys, z)
         model_xdata = DataAndMetadata.new_data_and_metadata(poly_data, dimensional_calibrations=[calibration],
                                                             intensity_calibration=spectrum_xdata.intensity_calibration)
     return model_xdata
Example #30
0
 def test_create_rgba_sequence_should_work(self):
     document_model = DocumentModel.DocumentModel()
     document_controller = DocumentController.DocumentController(
         self.app.ui, document_model, workspace_id="library")
     with contextlib.closing(document_controller):
         data = (numpy.random.rand(4, 64, 64, 3) * 255).astype(numpy.uint8)
         data_item = DataItem.new_data_item(
             DataAndMetadata.new_data_and_metadata(
                 data,
                 data_descriptor=DataAndMetadata.DataDescriptor(True, 0,
                                                                2)))
         document_model.append_data_item(data_item)
         display_item = document_model.get_display_item_for_data_item(
             data_item)
         display_data_channel = display_item.display_data_channels[0]
         self.assertEqual(
             display_data_channel.get_calculated_display_values(
                 True).display_data_and_metadata.data_dtype, numpy.uint8)
         self.assertEqual(
             display_data_channel.get_calculated_display_values(
                 True).display_data_and_metadata.data_shape[-1], 3)