def execute(self, si_sequence_data_item: API_1_0.DataItem, haadf_sequence_data_item: API_1_0.DataItem, align_index: int, align_region: typing.Optional[API_1_0.Graphic]=None): haadf_xdata = haadf_sequence_data_item.xdata si_xdata = si_sequence_data_item.xdata bounds = None if align_region: bounds = align_region.bounds translations = Core.function_sequence_measure_relative_translation(haadf_xdata, haadf_xdata[align_index], 10, True, bounds=bounds) sequence_shape = haadf_sequence_data_item.xdata.sequence_dimension_shape data_zeros = (0,) * si_xdata.datum_dimension_count c = int(numpy.product(sequence_shape)) haadf_result_data = numpy.empty_like(haadf_xdata.data) si_result_data = numpy.empty_like(si_xdata.data) for i in range(c): ii = numpy.unravel_index(i, sequence_shape) current_xdata = DataAndMetadata.new_data_and_metadata(haadf_xdata.data[ii]) translation = translations.data[ii] haadf_result_data[ii] = Core.function_shift(current_xdata, tuple(translation)).data current_xdata = DataAndMetadata.new_data_and_metadata(si_xdata.data[ii]) si_result_data[ii] = Core.function_shift(current_xdata, tuple(translation) + data_zeros).data self.progress_updated_event.fire(0, c, i+1) self.__aligned_haadf_sequence = DataAndMetadata.new_data_and_metadata(haadf_result_data, intensity_calibration=haadf_xdata.intensity_calibration, dimensional_calibrations=haadf_xdata.dimensional_calibrations, metadata=haadf_xdata.metadata, data_descriptor=haadf_xdata.data_descriptor) self.__aligned_si_sequence = DataAndMetadata.new_data_and_metadata(si_result_data, intensity_calibration=si_xdata.intensity_calibration, dimensional_calibrations=si_xdata.dimensional_calibrations, metadata=si_xdata.metadata, data_descriptor=si_xdata.data_descriptor)
def acquire_sequence(self, n: int) -> typing.Optional[typing.Dict]: # if the device does not implement acquire_sequence, fall back to looping acquisition. self.__is_acquiring = True self.__has_data_event.clear() # ensure any has_data_event is new data self.__instrument.sequence_progress = 0 try: properties = None data = None for index in range(n): if self.__cancel_sequence_event.is_set(): return None frame_data_element = self.__acquire_image(direct=True) frame_data = frame_data_element["data"] if self.__processing == "sum_project" and len( frame_data.shape) > 1: data_shape = (n, ) + frame_data.shape[1:] data_dtype = frame_data.dtype elif self.__processing == "sum_masked": data_shape = (n, len(self.__mask_array) if self.__mask_array is not None else 1) data_dtype = frame_data.dtype else: data_shape = (n, ) + frame_data.shape data_dtype = frame_data.dtype if data is None: data = numpy.zeros(data_shape, data_dtype) assert data.shape == data_shape assert data.dtype == data_dtype if self.__processing == "sum_project" and len( frame_data.shape) > 1: data[index] = Core.function_sum( DataAndMetadata.new_data_and_metadata(frame_data), 0).data elif self.__processing == "sum_masked": if self.__mask_array is not None: data[index] = Core.function_sum( DataAndMetadata.new_data_and_metadata( frame_data * self.__mask_array), (1, 2)).data else: data[index] = numpy.sum(frame_data) else: data[index] = frame_data properties = copy.deepcopy(frame_data_element["properties"]) if self.__processing == "sum_project": properties["valid_rows"] = 1 spatial_properties = properties.get("spatial_calibrations") if spatial_properties is not None: properties[ "spatial_calibrations"] = spatial_properties[1:] self.__instrument.increment_sequence_progress() finally: self.__is_acquiring = False data_element = dict() data_element["data"] = data data_element["properties"] = properties return data_element
def execute(self, si_sequence_data_item: API_1_0.DataItem, haadf_sequence_data_item: API_1_0.DataItem, align_index: int, align_region: API_1_0.Graphic): haadf_xdata = haadf_sequence_data_item.xdata si_xdata = si_sequence_data_item.xdata bounds = align_region.bounds translations = Core.function_sequence_measure_relative_translation( haadf_xdata, haadf_xdata[align_index], 10, True, bounds=bounds) sequence_shape = haadf_sequence_data_item.xdata.sequence_dimension_shape c = int(numpy.product(sequence_shape)) haadf_result_data = numpy.empty_like(haadf_xdata.data) si_result_data = numpy.empty_like(si_xdata.data) align_data_shape = haadf_sequence_data_item.xdata.datum_dimension_shape align_axes_start_index = None for i in range(len(si_xdata.data_shape) - 1): if align_data_shape == si_xdata.data_shape[i:i + 2]: align_axes_start_index = i break else: raise RuntimeError( 'Could not find axes that match the HAADF shape in SI data item.' ) si_translation = [0.0 ] * (len(si_xdata.data_shape) - len(sequence_shape)) align_axes_start_index -= len(sequence_shape) assert align_axes_start_index >= 0 for i in range(c): ii = numpy.unravel_index(i, sequence_shape) current_xdata = DataAndMetadata.new_data_and_metadata( haadf_xdata.data[ii]) translation = translations.data[ii] haadf_result_data[ii] = Core.function_shift( current_xdata, tuple(translation)).data current_xdata = DataAndMetadata.new_data_and_metadata( si_xdata.data[ii]) si_translation[align_axes_start_index] = translation[0] si_translation[align_axes_start_index + 1] = translation[1] si_result_data[ii] = Core.function_shift( current_xdata, tuple(si_translation)).data self.__aligned_haadf_sequence = DataAndMetadata.new_data_and_metadata( haadf_result_data, intensity_calibration=haadf_xdata.intensity_calibration, dimensional_calibrations=haadf_xdata.dimensional_calibrations, metadata=haadf_xdata.metadata, data_descriptor=haadf_xdata.data_descriptor) self.__aligned_si_sequence = DataAndMetadata.new_data_and_metadata( si_result_data, intensity_calibration=si_xdata.intensity_calibration, dimensional_calibrations=si_xdata.dimensional_calibrations, metadata=si_xdata.metadata, data_descriptor=si_xdata.data_descriptor)
def calculate_region_data(display_data_and_metadata: typing.Optional[DataAndMetadata.DataAndMetadata], region: Graphics.Graphic) -> typing.Optional[DataAndMetadata.DataAndMetadata]: if region is not None and display_data_and_metadata is not None: if display_data_and_metadata.is_data_1d and isinstance(region, Graphics.IntervalGraphic): interval = region.interval if 0 <= interval[0] < 1 and 0 < interval[1] <= 1: start, end = int(interval[0] * display_data_and_metadata.data_shape[0]), int(interval[1] * display_data_and_metadata.data_shape[0]) if end - start >= 1: cropped_data_and_metadata = Core.function_crop_interval(display_data_and_metadata, interval) if cropped_data_and_metadata: return cropped_data_and_metadata elif display_data_and_metadata.is_data_2d and isinstance(region, Graphics.RectangleTypeGraphic): cropped_data_and_metadata = Core.function_crop(display_data_and_metadata, region.bounds.as_tuple()) if cropped_data_and_metadata: return cropped_data_and_metadata return display_data_and_metadata
def affine_transform( data_and_metadata: DataAndMetadata.DataAndMetadata, transformation_matrix: numpy.ndarray, order: int = 1) -> typing.Optional[DataAndMetadata.DataAndMetadata]: return Core.function_affine_transform(data_and_metadata, transformation_matrix, order=order)
def integrate_signal(self, *, spectrum_xdata: DataAndMetadata.DataAndMetadata, fit_intervals: typing.Sequence[BackgroundInterval], signal_interval: BackgroundInterval, **kwargs) -> typing.Dict: # set up initial values subtracted_xdata = Core.calibrated_subtract_spectrum( spectrum_xdata, self.__fit_background(spectrum_xdata, fit_intervals, signal_interval)) assert subtracted_xdata subtracted_data = subtracted_xdata.data assert subtracted_data is not None if spectrum_xdata.is_navigable: return { "integrated": DataAndMetadata.new_data_and_metadata( numpy.trapz(subtracted_data), dimensional_calibrations=spectrum_xdata. navigation_dimensional_calibrations) } else: return { "integrated_value": numpy.trapz(subtracted_data), }
def get_scan_data(self, frame_parameters, channel) -> numpy.ndarray: size = Geometry.IntSize.make(frame_parameters.subscan_pixel_size if frame_parameters.subscan_pixel_size else frame_parameters.size) offset_m = self.stage_position_m - self.GetVal2D("beam_shift_m") fov_size_nm = Geometry.FloatSize.make(frame_parameters.fov_size_nm) if frame_parameters.fov_size_nm else Geometry.FloatSize(frame_parameters.fov_nm, frame_parameters.fov_nm) if frame_parameters.subscan_fractional_size: subscan_fractional_size = Geometry.FloatSize.make(frame_parameters.subscan_fractional_size) used_fov_size_nm = Geometry.FloatSize(height=fov_size_nm.height * subscan_fractional_size.height, width=fov_size_nm.width * subscan_fractional_size.width) else: used_fov_size_nm = fov_size_nm center_nm = Geometry.FloatPoint.make(frame_parameters.center_nm) if frame_parameters.subscan_fractional_center: subscan_fractional_center = Geometry.FloatPoint.make(frame_parameters.subscan_fractional_center) center_nm += Geometry.FloatPoint(y=(subscan_fractional_center.y - 0.5) * fov_size_nm.height, x=(subscan_fractional_center.x - 0.5) * fov_size_nm.width) extra = int(math.ceil(max(size.height * math.sqrt(2) - size.height, size.width * math.sqrt(2) - size.width))) extra_nm = Geometry.FloatPoint(y=(extra / size.height) * used_fov_size_nm[0], x=(extra / size.width) * used_fov_size_nm[1]) used_size = size + Geometry.IntSize(height=extra, width=extra) data = numpy.zeros((used_size.height, used_size.width), numpy.float32) self.sample.plot_features(data, offset_m, used_fov_size_nm, extra_nm, center_nm, used_size) noise_factor = 0.3 if frame_parameters.rotation_rad != 0: inner_height = size.height / used_size.height inner_width = size.width / used_size.width inner_bounds = ((1.0 - inner_height) * 0.5, (1.0 - inner_width) * 0.5), (inner_height, inner_width) data = Core.function_crop_rotated(DataAndMetadata.new_data_and_metadata(data), inner_bounds, -frame_parameters.rotation_rad).data # TODO: data is not always the correct size else: data = data[extra // 2:extra // 2 + size.height, extra // 2:extra // 2 + size.width] return (data + numpy.random.randn(size.height, size.width) * noise_factor) * frame_parameters.pixel_time_us
def align( src: DataAndMetadata.DataAndMetadata, target: DataAndMetadata.DataAndMetadata, upsample_factor: int = 1, bounds: typing.Union[Core.NormRectangleType, Core.NormIntervalType] = None ) -> typing.Optional[DataAndMetadata.DataAndMetadata]: return Core.function_align(src, target, upsample_factor, bounds=bounds)
def register_translation( xdata1: DataAndMetadata.DataAndMetadata, xdata2: DataAndMetadata.DataAndMetadata, upsample_factor: int = 1, subtract_means: bool = True) -> typing.Tuple[float, ...]: return Core.function_register(xdata1, xdata2, upsample_factor, subtract_means)
def sequence_register_translation( src: DataAndMetadata.DataAndMetadata, upsample_factor: int = 1, subtract_means: bool = True, bounds: typing.Union[Core.NormRectangleType, Core.NormIntervalType] = None ) -> typing.Optional[DataAndMetadata.DataAndMetadata]: return Core.function_sequence_register_translation(src, upsample_factor, subtract_means)
def transpose_flip( data_and_metadata: DataAndMetadata.DataAndMetadata, transpose: bool = False, flip_v: bool = False, flip_h: bool = False ) -> typing.Optional[DataAndMetadata.DataAndMetadata]: return Core.function_transpose_flip(data_and_metadata, transpose, flip_v, flip_h)
def register_translation( xdata1: _DataAndMetadataLike, xdata2: _DataAndMetadataLike, upsample_factor: typing.Optional[int] = None, subtract_means: bool = True) -> typing.Tuple[float, ...]: if upsample_factor is not None: warnings.warn( "'upsample_factor' is deprecated and will be removed in a future release.", category=FutureWarning) return Core.function_register(xdata1, xdata2, subtract_means)
def get_scan_data(self, frame_parameters, channel) -> numpy.ndarray: size = Geometry.IntSize.make( frame_parameters.subscan_pixel_size if frame_parameters. subscan_pixel_size else frame_parameters.size) offset_m = self.actual_offset_m # stage position - beam shift + drift fov_size_nm = Geometry.FloatSize.make( frame_parameters.fov_size_nm ) if frame_parameters.fov_size_nm else Geometry.FloatSize( frame_parameters.fov_nm, frame_parameters.fov_nm) if frame_parameters.subscan_fractional_size: subscan_fractional_size = Geometry.FloatSize.make( frame_parameters.subscan_fractional_size) used_fov_size_nm = Geometry.FloatSize( height=fov_size_nm.height * subscan_fractional_size.height, width=fov_size_nm.width * subscan_fractional_size.width) else: used_fov_size_nm = fov_size_nm center_nm = Geometry.FloatPoint.make(frame_parameters.center_nm) if frame_parameters.subscan_fractional_center: subscan_fractional_center = Geometry.FloatPoint.make( frame_parameters.subscan_fractional_center ) - Geometry.FloatPoint(y=0.5, x=0.5) fc = subscan_fractional_center.rotate( frame_parameters.rotation_rad) center_nm += Geometry.FloatPoint(y=fc.y * fov_size_nm.height, x=fc.x * fov_size_nm.width) extra = int( math.ceil( max(size.height * math.sqrt(2) - size.height, size.width * math.sqrt(2) - size.width))) extra_nm = Geometry.FloatPoint( y=(extra / size.height) * used_fov_size_nm[0], x=(extra / size.width) * used_fov_size_nm[1]) used_size = size + Geometry.IntSize(height=extra, width=extra) data = numpy.zeros((used_size.height, used_size.width), numpy.float32) self.sample.plot_features(data, offset_m, used_fov_size_nm, extra_nm, center_nm, used_size) noise_factor = 0.3 total_rotation = frame_parameters.rotation_rad if frame_parameters.subscan_rotation: total_rotation -= frame_parameters.subscan_rotation if total_rotation != 0: inner_height = size.height / used_size.height inner_width = size.width / used_size.width inner_bounds = ((1.0 - inner_height) * 0.5, (1.0 - inner_width) * 0.5), (inner_height, inner_width) data = Core.function_crop_rotated( DataAndMetadata.new_data_and_metadata(data), inner_bounds, -total_rotation).data else: data = data[extra // 2:extra // 2 + size.height, extra // 2:extra // 2 + size.width] return (data + numpy.random.randn(size.height, size.width) * noise_factor) * frame_parameters.pixel_time_us
def execute(self, eels_spectrum_data_item, background_model, fit_interval_graphics, **kwargs) -> None: try: spectrum_xdata = eels_spectrum_data_item.xdata assert spectrum_xdata.is_datum_1d assert spectrum_xdata.datum_dimensional_calibrations[ 0].units == "eV" eels_spectrum_xdata = spectrum_xdata # fit_interval_graphics.interval returns normalized coordinates. create calibrated intervals. fit_intervals: typing.List[ BackgroundModel.BackgroundInterval] = list() for fit_interval_graphic in fit_interval_graphics: fit_intervals.append(fit_interval_graphic.interval) fit_minimum = min( [fit_interval[0] for fit_interval in fit_intervals]) signal_interval = fit_minimum, 1.0 signal_xdata = BackgroundModel.get_calibrated_interval_slice( eels_spectrum_xdata, signal_interval) background_xdata = None subtracted_xdata = None if background_model._data_structure.entity: entity_id = background_model._data_structure.entity.entity_type.entity_id for component in Registry.get_components_by_type( "background-model"): if entity_id == component.background_model_id: fit_result = component.fit_background( spectrum_xdata=spectrum_xdata, fit_intervals=fit_intervals, background_interval=signal_interval) background_xdata = fit_result["background_model"] # use 'or' to avoid doing subtraction if subtracted_spectrum already present subtracted_xdata = fit_result.get( "subtracted_spectrum", None) or Core.calibrated_subtract_spectrum( spectrum_xdata, background_xdata) if background_xdata is None: background_xdata = DataAndMetadata.new_data_and_metadata( numpy.zeros_like(signal_xdata.data), intensity_calibration=signal_xdata.intensity_calibration, dimensional_calibrations=signal_xdata. dimensional_calibrations) if subtracted_xdata is None: subtracted_xdata = DataAndMetadata.new_data_and_metadata( signal_xdata.data, intensity_calibration=signal_xdata.intensity_calibration, dimensional_calibrations=signal_xdata. dimensional_calibrations) self.__background_xdata = background_xdata self.__subtracted_xdata = subtracted_xdata except Exception as e: import traceback print(traceback.format_exc()) print(e) raise
def sequence_fourier_align( src: _DataAndMetadataLike, upsample_factor: typing.Optional[int] = None, bounds: typing.Optional[typing.Union[Core.NormRectangleType, Core.NormIntervalType]] = None ) -> DataAndMetadata.DataAndMetadata: if upsample_factor is not None: warnings.warn( "'upsample_factor' is deprecated and will be removed in a future release.", category=FutureWarning) return Core.function_sequence_fourier_align(src, bounds=bounds)
def sequence_measure_relative_translation( src: _DataAndMetadataLike, ref: _DataAndMetadataLike, upsample_factor: typing.Optional[int] = None, subtract_means: bool = True, bounds: typing.Optional[typing.Union[Core.NormRectangleType, Core.NormIntervalType]] = None ) -> DataAndMetadata.DataAndMetadata: return Core.function_sequence_measure_relative_translation(src, ref, subtract_means, bounds=bounds)
def subtract_background(self, *, spectrum_xdata: DataAndMetadata.DataAndMetadata, fit_intervals: typing.Sequence[BackgroundInterval], **kwargs) -> typing.Dict: # set up initial values fit_minimum = min([fit_interval[0] for fit_interval in fit_intervals]) signal_interval = fit_minimum, 1.0 subtracted_xdata = Core.calibrated_subtract_spectrum( spectrum_xdata, self.__fit_background(spectrum_xdata, fit_intervals, signal_interval)) assert subtracted_xdata return {"subtracted": subtracted_xdata}
def __acquire_sequence(self, n: int, frame_parameters) -> dict: if callable(getattr(self.__camera, "acquire_sequence", None)): data_element = self.__camera.acquire_sequence(n) if data_element is not None: return data_element # if the device does not implement acquire_sequence, fall back to looping acquisition. processing = frame_parameters.processing acquisition_task = CameraAcquisitionTask(self.__get_stem_controller(), self.hardware_source_id, True, self.__camera, self.__camera_category, frame_parameters, self.display_name) acquisition_task._start_acquisition() try: properties = None data = None for index in range(n): frame_data_element = acquisition_task._acquire_data_elements( )[0] frame_data = frame_data_element["data"] if data is None: if processing == "sum_project" and len( frame_data.shape) > 1: data = numpy.empty((n, ) + frame_data.shape[1:], frame_data.dtype) else: data = numpy.empty((n, ) + frame_data.shape, frame_data.dtype) if processing == "sum_project" and len(frame_data.shape) > 1: data[index] = Core.function_sum( DataAndMetadata.new_data_and_metadata(frame_data), 0).data else: data[index] = frame_data properties = copy.deepcopy(frame_data_element["properties"]) if processing == "sum_project": properties["valid_rows"] = 1 spatial_properties = properties.get("spatial_calibrations") if spatial_properties is not None: properties[ "spatial_calibrations"] = spatial_properties[1:] finally: acquisition_task._stop_acquisition() data_element = dict() data_element["data"] = data data_element["properties"] = properties return data_element
def execute(self, eels_spectrum_data_item, zlp_model, **kwargs) -> None: try: spectrum_xdata = eels_spectrum_data_item.xdata assert spectrum_xdata.is_datum_1d assert spectrum_xdata.datum_dimensional_calibrations[ 0].units == "eV" eels_spectrum_xdata = spectrum_xdata model_xdata = None subtracted_xdata = None if zlp_model._data_structure.entity: entity_id = zlp_model._data_structure.entity.entity_type.entity_id for component in Registry.get_components_by_type("zlp-model"): # print(f"{entity_id=} {component.zero_loss_peak_model_id=}") if entity_id == component.zero_loss_peak_model_id: fit_result = component.fit_zero_loss_peak( spectrum_xdata=spectrum_xdata) model_xdata = fit_result["zero_loss_peak_model"] # use 'or' to avoid doing subtraction if subtracted_spectrum already present subtracted_xdata = fit_result.get( "subtracted_spectrum", None) or Core.calibrated_subtract_spectrum( spectrum_xdata, model_xdata) if model_xdata is None: model_xdata = DataAndMetadata.new_data_and_metadata( numpy.zeros_like(eels_spectrum_xdata.data), intensity_calibration=eels_spectrum_xdata. intensity_calibration, dimensional_calibrations=eels_spectrum_xdata. dimensional_calibrations) if subtracted_xdata is None: subtracted_xdata = DataAndMetadata.new_data_and_metadata( eels_spectrum_xdata.data, intensity_calibration=eels_spectrum_xdata. intensity_calibration, dimensional_calibrations=eels_spectrum_xdata. dimensional_calibrations) self.__model_xdata = model_xdata self.__subtracted_xdata = subtracted_xdata except Exception as e: import traceback print(traceback.format_exc()) print(e) raise
def crop_rotated(data_and_metadata: DataAndMetadata.DataAndMetadata, bounds: Core.NormRectangleType, angle: float) -> typing.Optional[DataAndMetadata.DataAndMetadata]: return Core.function_crop_rotated(data_and_metadata, bounds, angle)
def warp(data_and_metadata: DataAndMetadata.DataAndMetadata, coordinates: typing.Sequence[DataAndMetadata.DataAndMetadata], order: int=1) -> typing.Optional[DataAndMetadata.DataAndMetadata]: return Core.function_warp(data_and_metadata, coordinates)
def resample_image(data_and_metadata: DataAndMetadata.DataAndMetadata, shape: DataAndMetadata.ShapeType) -> typing.Optional[DataAndMetadata.DataAndMetadata]: return Core.function_resample_2d(data_and_metadata, shape)
def average_region(data_and_metadata: DataAndMetadata.DataAndMetadata, mask_data_and_metadata: DataAndMetadata.DataAndMetadata) -> typing.Optional[DataAndMetadata.DataAndMetadata]: return Core.function_average_region(data_and_metadata, mask_data_and_metadata)
def mean(data_and_metadata: DataAndMetadata.DataAndMetadata, axis: typing.Union[int, typing.Sequence[int]]=None, keepdims: bool=False) -> typing.Optional[DataAndMetadata.DataAndMetadata]: return Core.function_mean(data_and_metadata, axis, keepdims=keepdims)
def pick(data_and_metadata: DataAndMetadata.DataAndMetadata, position: DataAndMetadata.PositionType) -> typing.Optional[DataAndMetadata.DataAndMetadata]: return Core.function_pick(data_and_metadata, position)
def slice_sum(data_and_metadata: DataAndMetadata.DataAndMetadata, slice_center: int, slice_width: int) -> typing.Optional[DataAndMetadata.DataAndMetadata]: return Core.function_slice_sum(data_and_metadata, slice_center, slice_width)
def crop(data_and_metadata: DataAndMetadata.DataAndMetadata, bounds: Core.NormRectangleType) -> typing.Optional[DataAndMetadata.DataAndMetadata]: return Core.function_crop(data_and_metadata, bounds)
def rescale(data_and_metadata: DataAndMetadata.DataAndMetadata, data_range: Core.DataRangeType=None) -> typing.Optional[DataAndMetadata.DataAndMetadata]: return Core.function_rescale(data_and_metadata, data_range)
def resize(data_and_metadata: DataAndMetadata.DataAndMetadata, shape: DataAndMetadata.ShapeType, mode=None) -> typing.Optional[DataAndMetadata.DataAndMetadata]: return Core.function_resize(data_and_metadata, shape, mode)
def crop_interval(data_and_metadata: DataAndMetadata.DataAndMetadata, interval: Core.NormIntervalType) -> typing.Optional[DataAndMetadata.DataAndMetadata]: return Core.function_crop_interval(data_and_metadata, interval)