示例#1
0
 def __init__(self, document_model: DocumentModel.DocumentModel, scan_hardware_source: scan_base.ScanHardwareSource, scan_frame_parameters: scan_base.ScanFrameParameters):
     # init with the frame parameters from the synchronized grab
     self.__document_model = document_model
     self.__scan_hardware_source = scan_hardware_source
     self.__scan_frame_parameters = copy.deepcopy(scan_frame_parameters)
     # here we convert those frame parameters to the context
     self.__scan_frame_parameters.subscan_pixel_size = None
     self.__scan_frame_parameters.subscan_fractional_size = None
     self.__scan_frame_parameters.subscan_fractional_center = None
     self.__scan_frame_parameters.subscan_rotation = 0.0
     self.__scan_frame_parameters.channel_override = "drift"
     self.__last_xdata = None
     self.__center_nm = Geometry.FloatSize()
     self.__last_offset_nm = Geometry.FloatSize()
     self.__offset_nm_data = numpy.zeros((3, 0), float)
     data_item = next(iter(data_item for data_item in document_model.data_items if data_item.title == "Drift Log"), None)
     if data_item:
         offset_nm_xdata = DataAndMetadata.new_data_and_metadata(self.__offset_nm_data, intensity_calibration=Calibration.Calibration(units="nm"))
         data_item.set_data_and_metadata(offset_nm_xdata)
     else:
         data_item = DataItem.DataItem(self.__offset_nm_data)
         data_item.title = f"Drift Log"
         self.__document_model.append_data_item(data_item)
         display_item = self.__document_model.get_display_item_for_data_item(data_item)
         display_item.display_type = "line_plot"
         display_item.append_display_data_channel_for_data_item(data_item)
         display_item.append_display_data_channel_for_data_item(data_item)
         display_item._set_display_layer_properties(0, label=_("x"))
         display_item._set_display_layer_properties(1, label=_("y"))
         display_item._set_display_layer_properties(2, label=_("m"))
     self.__data_item = data_item
示例#2
0
 def get_scan_data(self, frame_parameters, channel) -> numpy.ndarray:
     size = Geometry.IntSize.make(
         frame_parameters.subscan_pixel_size if frame_parameters.
         subscan_pixel_size else frame_parameters.size)
     offset_m = self.actual_offset_m  # stage position - beam shift + drift
     fov_size_nm = Geometry.FloatSize.make(
         frame_parameters.fov_size_nm
     ) if frame_parameters.fov_size_nm else Geometry.FloatSize(
         frame_parameters.fov_nm, frame_parameters.fov_nm)
     if frame_parameters.subscan_fractional_size:
         subscan_fractional_size = Geometry.FloatSize.make(
             frame_parameters.subscan_fractional_size)
         used_fov_size_nm = Geometry.FloatSize(
             height=fov_size_nm.height * subscan_fractional_size.height,
             width=fov_size_nm.width * subscan_fractional_size.width)
     else:
         used_fov_size_nm = fov_size_nm
     center_nm = Geometry.FloatPoint.make(frame_parameters.center_nm)
     if frame_parameters.subscan_fractional_center:
         subscan_fractional_center = Geometry.FloatPoint.make(
             frame_parameters.subscan_fractional_center
         ) - Geometry.FloatPoint(y=0.5, x=0.5)
         fc = subscan_fractional_center.rotate(
             frame_parameters.rotation_rad)
         center_nm += Geometry.FloatPoint(y=fc.y * fov_size_nm.height,
                                          x=fc.x * fov_size_nm.width)
     extra = int(
         math.ceil(
             max(size.height * math.sqrt(2) - size.height,
                 size.width * math.sqrt(2) - size.width)))
     extra_nm = Geometry.FloatPoint(
         y=(extra / size.height) * used_fov_size_nm[0],
         x=(extra / size.width) * used_fov_size_nm[1])
     used_size = size + Geometry.IntSize(height=extra, width=extra)
     data = numpy.zeros((used_size.height, used_size.width), numpy.float32)
     self.sample.plot_features(data, offset_m, used_fov_size_nm, extra_nm,
                               center_nm, used_size)
     noise_factor = 0.3
     total_rotation = frame_parameters.rotation_rad
     if frame_parameters.subscan_rotation:
         total_rotation -= frame_parameters.subscan_rotation
     if total_rotation != 0:
         inner_height = size.height / used_size.height
         inner_width = size.width / used_size.width
         inner_bounds = ((1.0 - inner_height) * 0.5,
                         (1.0 - inner_width) * 0.5), (inner_height,
                                                      inner_width)
         data = Core.function_crop_rotated(
             DataAndMetadata.new_data_and_metadata(data), inner_bounds,
             -total_rotation).data
     else:
         data = data[extra // 2:extra // 2 + size.height,
                     extra // 2:extra // 2 + size.width]
     return (data + numpy.random.randn(size.height, size.width) *
             noise_factor) * frame_parameters.pixel_time_us
 def get_scan_data(self, frame_parameters, channel) -> numpy.ndarray:
     size = Geometry.IntSize.make(frame_parameters.subscan_pixel_size if frame_parameters.subscan_pixel_size else frame_parameters.size)
     offset_m = self.stage_position_m - self.GetVal2D("beam_shift_m")
     fov_size_nm = Geometry.FloatSize.make(frame_parameters.fov_size_nm) if frame_parameters.fov_size_nm else Geometry.FloatSize(frame_parameters.fov_nm, frame_parameters.fov_nm)
     if frame_parameters.subscan_fractional_size:
         subscan_fractional_size = Geometry.FloatSize.make(frame_parameters.subscan_fractional_size)
         used_fov_size_nm = Geometry.FloatSize(height=fov_size_nm.height * subscan_fractional_size.height,
                                               width=fov_size_nm.width * subscan_fractional_size.width)
     else:
         used_fov_size_nm = fov_size_nm
     center_nm = Geometry.FloatPoint.make(frame_parameters.center_nm)
     if frame_parameters.subscan_fractional_center:
         subscan_fractional_center = Geometry.FloatPoint.make(frame_parameters.subscan_fractional_center)
         center_nm += Geometry.FloatPoint(y=(subscan_fractional_center.y - 0.5) * fov_size_nm.height,
                                          x=(subscan_fractional_center.x - 0.5) * fov_size_nm.width)
     extra = int(math.ceil(max(size.height * math.sqrt(2) - size.height, size.width * math.sqrt(2) - size.width)))
     extra_nm = Geometry.FloatPoint(y=(extra / size.height) * used_fov_size_nm[0], x=(extra / size.width) * used_fov_size_nm[1])
     used_size = size + Geometry.IntSize(height=extra, width=extra)
     data = numpy.zeros((used_size.height, used_size.width), numpy.float32)
     self.sample.plot_features(data, offset_m, used_fov_size_nm, extra_nm, center_nm, used_size)
     noise_factor = 0.3
     if frame_parameters.rotation_rad != 0:
         inner_height = size.height / used_size.height
         inner_width = size.width / used_size.width
         inner_bounds = ((1.0 - inner_height) * 0.5, (1.0 - inner_width) * 0.5), (inner_height, inner_width)
         data = Core.function_crop_rotated(DataAndMetadata.new_data_and_metadata(data), inner_bounds, -frame_parameters.rotation_rad).data
         # TODO: data is not always the correct size
     else:
         data = data[extra // 2:extra // 2 + size.height, extra // 2:extra // 2 + size.width]
     return (data + numpy.random.randn(size.height, size.width) * noise_factor) * frame_parameters.pixel_time_us
示例#4
0
 def __init__(self, stage_size_nm: float):
     self.__features = list()
     sample_size_m = Geometry.FloatSize(height=20 * stage_size_nm / 100, width=20 * stage_size_nm / 100) / 1E9
     feature_percentage = 0.3
     random_state = random.getstate()
     random.seed(1)
     energies = [[(68, 30), (855, 50), (872, 50)], [(29, 15), (1217, 50), (1248, 50)],
                 [(1839, 5), (99, 50)]]  # Ni, Ge, Si
     plasmons = [20, 16.2, 16.8]
     for i in range(100):
         position_m = Geometry.FloatPoint(y=(2 * random.random() - 1.0) * sample_size_m.height,
                                          x=(2 * random.random() - 1.0) * sample_size_m.width)
         size_m = feature_percentage * Geometry.FloatSize(height=random.random() * sample_size_m.height,
                                                          width=random.random() * sample_size_m.width)
         self.__features.append(
             Feature(position_m, size_m, energies[i % len(energies)], plasmons[i % len(plasmons)], 4))
     random.setstate(random_state)
 def get_scan_rect_m(self, offset_m: Geometry.FloatPoint,
                     fov_nm: Geometry.FloatSize,
                     center_nm: Geometry.FloatPoint) -> Geometry.FloatRect:
     scan_size_m = Geometry.FloatSize(height=fov_nm.height,
                                      width=fov_nm.width) / 1E9
     scan_rect_m = Geometry.FloatRect.from_center_and_size(
         Geometry.FloatPoint.make(center_nm) / 1E9, scan_size_m)
     scan_rect_m -= offset_m
     return scan_rect_m
示例#6
0
 def prepare_section(self) -> scan_base.SynchronizedScanBehaviorAdjustments:
     # this method must be thread safe
     # start with the context frame parameters and adjust for the drift region
     adjustments = scan_base.SynchronizedScanBehaviorAdjustments()
     frame_parameters = copy.deepcopy(self.__scan_frame_parameters)
     context_size = Geometry.FloatSize.make(frame_parameters.size)
     drift_channel_id = self.__scan_hardware_source.drift_channel_id
     drift_region = Geometry.FloatRect.make(self.__scan_hardware_source.drift_region)
     drift_rotation = self.__scan_hardware_source.drift_rotation
     if drift_channel_id is not None and drift_region is not None:
         drift_channel_index = self.__scan_hardware_source.get_channel_index(drift_channel_id)
         frame_parameters.subscan_pixel_size = int(context_size.height * drift_region.height * 4), int(context_size.width * drift_region.width * 4)
         if frame_parameters.subscan_pixel_size[0] >= 8 or frame_parameters.subscan_pixel_size[1] >= 8:
             frame_parameters.subscan_fractional_size = drift_region.height, drift_region.width
             frame_parameters.subscan_fractional_center = drift_region.center.y, drift_region.center.x
             frame_parameters.subscan_rotation = drift_rotation
             # attempt to keep drift area in roughly the same position by adding in the accumulated correction.
             frame_parameters.center_nm = tuple(Geometry.FloatPoint.make(frame_parameters.center_nm) + self.__center_nm)
             xdatas = self.__scan_hardware_source.record_immediate(frame_parameters, [drift_channel_index])
             # new_offset = self.__scan_hardware_source.stem_controller.drift_offset_m
             if self.__last_xdata:
                 # calculate offset. if data shifts down/right, offset will be negative (register_translation convention).
                 # offset = Geometry.FloatPoint.make(xd.register_translation(self.__last_xdata, xdatas[0], upsample_factor=10))
                 quality, offset = xd.register_template(self.__last_xdata, xdatas[0])
                 offset = Geometry.FloatPoint.make(offset)
                 offset_nm = Geometry.FloatSize(
                     h=xdatas[0].dimensional_calibrations[0].convert_to_calibrated_size(offset.y),
                     w=xdatas[0].dimensional_calibrations[1].convert_to_calibrated_size(offset.x))
                 # calculate adjustment (center_nm). if center_nm positive, data shifts up/left.
                 # rotate back into context reference frame
                 offset_nm = offset_nm.rotate(-drift_rotation)
                 offset_nm -= self.__center_nm  # adjust for center_nm adjustment above
                 delta_nm = offset_nm - self.__last_offset_nm
                 self.__last_offset_nm = offset_nm
                 offset_nm_xy = math.sqrt(pow(offset_nm.height, 2) + pow(offset_nm.width, 2))
                 self.__offset_nm_data = numpy.hstack([self.__offset_nm_data, numpy.array([offset_nm.height, offset_nm.width, offset_nm_xy]).reshape(3, 1)])
                 offset_nm_xdata = DataAndMetadata.new_data_and_metadata(self.__offset_nm_data, intensity_calibration=Calibration.Calibration(units="nm"))
                 def update_data_item(offset_nm_xdata: DataAndMetadata.DataAndMetadata) -> None:
                     self.__data_item.set_data_and_metadata(offset_nm_xdata)
                 self.__scan_hardware_source._call_soon(functools.partial(update_data_item, offset_nm_xdata))
                 # report the difference from the last time we reported, but negative since center_nm positive shifts up/left
                 adjustments.offset_nm = -delta_nm
                 self.__center_nm -= delta_nm
                 # print(f"{self.__last_offset_nm} {adjustments.offset_nm} [{(new_offset - self.__last_offset) * 1E9}] {offset} {frame_parameters.fov_nm}")
                 if False:  # if offset_nm > drift area / 10?
                     # retake to provide reference at new offset
                     frame_parameters.center_nm = tuple(Geometry.FloatPoint.make(frame_parameters.center_nm) + adjustments.offset_nm)
                     self.__scan_frame_parameters.center_nm = frame_parameters.center_nm
                     xdatas = self.__scan_hardware_source.record_immediate(frame_parameters, [drift_channel_index])
                     new_offset = self.__scan_hardware_source.stem_controller.drift_offset_m
             else:
                 self.__last_xdata = xdatas[0]
     return adjustments
示例#7
0
 def setup_camera_hardware_source(
         self, stem_controller: stem_controller.STEMController,
         camera_exposure: float,
         is_eels: bool) -> HardwareSource.HardwareSource:
     instrument = typing.cast(InstrumentDevice.Instrument, stem_controller)
     camera_id = "usim_ronchigram_camera" if not is_eels else "usim_eels_camera"
     camera_type = "ronchigram" if not is_eels else "eels"
     camera_name = "uSim Camera"
     camera_settings = CameraDevice.CameraSettings(camera_id)
     camera_device = CameraDevice.Camera(camera_id, camera_type,
                                         camera_name, instrument)
     if getattr(camera_device, "camera_version", 2) == 3:
         camera_hardware_source = camera_base.CameraHardwareSource3(
             "usim_stem_controller", camera_device, camera_settings, None,
             None)
     else:
         camera_hardware_source = camera_base.CameraHardwareSource2(
             "usim_stem_controller", camera_device, camera_settings, None,
             None)
     if is_eels:
         camera_hardware_source.features["is_eels_camera"] = True
         camera_hardware_source.add_channel_processor(
             0,
             HardwareSource.SumProcessor(
                 Geometry.FloatRect(Geometry.FloatPoint(0.25, 0.0),
                                    Geometry.FloatSize(0.5, 1.0))))
     camera_hardware_source.set_frame_parameters(
         0,
         camera_base.CameraFrameParameters({
             "exposure_ms": camera_exposure * 1000,
             "binning": 2
         }))
     camera_hardware_source.set_frame_parameters(
         1,
         camera_base.CameraFrameParameters({
             "exposure_ms": camera_exposure * 1000,
             "binning": 2
         }))
     camera_hardware_source.set_frame_parameters(
         2,
         camera_base.CameraFrameParameters({
             "exposure_ms":
             camera_exposure * 1000 * 2,
             "binning":
             1
         }))
     camera_hardware_source.set_selected_profile_index(0)
     return camera_hardware_source
 def prepare_section(
         self,
         *,
         utc_time: typing.Optional[datetime.datetime] = None) -> None:
     # this method must be thread safe
     # start with the context frame parameters and adjust for the drift region
     frame_parameters = copy.deepcopy(self.__scan_frame_parameters)
     context_size = frame_parameters.size.to_float_size()
     drift_channel_id = self.__scan_hardware_source.drift_channel_id
     drift_region = self.__scan_hardware_source.drift_region
     drift_rotation = self.__scan_hardware_source.drift_rotation
     if drift_channel_id is not None and drift_region is not None:
         drift_channel_index = self.__scan_hardware_source.get_channel_index(
             drift_channel_id)
         assert drift_channel_index is not None
         frame_parameters.subscan_pixel_size = Geometry.IntSize(
             int(context_size.height * drift_region.height * 4),
             int(context_size.width * drift_region.width * 4))
         if frame_parameters.subscan_pixel_size[
                 0] >= 8 or frame_parameters.subscan_pixel_size[1] >= 8:
             frame_parameters.subscan_fractional_size = Geometry.FloatSize(
                 drift_region.height, drift_region.width)
             frame_parameters.subscan_fractional_center = Geometry.FloatPoint(
                 drift_region.center.y, drift_region.center.x)
             frame_parameters.subscan_rotation = drift_rotation
             # attempt to keep drift area in roughly the same position by adding in the accumulated correction.
             drift_tracker = self.__scan_hardware_source.drift_tracker
             utc_time = utc_time or datetime.datetime.utcnow()
             delta_nm = drift_tracker.predict_drift(utc_time)
             frame_parameters.center_nm = frame_parameters.center_nm - delta_nm
             xdatas = self.__scan_hardware_source.record_immediate(
                 frame_parameters, [drift_channel_index])
             xdata0 = xdatas[0]
             if xdata0:
                 drift_tracker.submit_image(xdata0,
                                            drift_rotation,
                                            wait=True)
示例#9
0
        def add_line_profile(
                data_item: DataItem.DataItem,
                document_controller: DocumentController.DocumentController,
                display_panel_id: str,
                midpoint: float = 0.5,
                integration_width: float = 0.25) -> None:
            logging.debug("midpoint: {:.4f}".format(midpoint))
            logging.debug("width: {:.4f}".format(integration_width))

            # next, line profile through center of crop
            # please don't copy this bad example code!
            crop_region = Graphics.RectangleGraphic()
            crop_region.center = Geometry.FloatPoint(midpoint, 0.5)
            crop_region.size = Geometry.FloatSize(integration_width, 1)
            crop_region.is_bounds_constrained = True
            display_item = document_controller.document_model.get_display_item_for_data_item(
                data_item)
            assert display_item
            display_item.add_graphic(crop_region)
            display_data_item = display_item.data_item
            assert display_data_item
            eels_data_item = document_controller.document_model.get_projection_new(
                display_item, display_data_item, crop_region)
            if eels_data_item:
                eels_data_item.title = _("EELS Summed")
                eels_display_item = document_controller.document_model.get_display_item_for_data_item(
                    eels_data_item)
                assert eels_display_item
                document_controller.show_display_item(eels_display_item)
            else:
                eels_display_item = None

            workspace_controller = document_controller.workspace_controller
            if workspace_controller and eels_display_item:
                workspace_controller.display_display_item_in_display_panel(
                    eels_display_item, display_panel_id)
示例#10
0
    def read_partial(
        self, frame_number, pixels_to_skip
    ) -> (typing.Sequence[dict], bool, bool, tuple, int, int):
        """Read or continue reading a frame.

        The `frame_number` may be None, in which case a new frame should be read.

        The `frame_number` otherwise specifies which frame to continue reading.

        The `pixels_to_skip` specifies where to start reading the frame, if it is a continuation.

        Return values should be a list of dict's (one for each active channel) containing two keys: 'data' and
        'properties' (see below), followed by a boolean indicating whether the frame is complete, a boolean indicating
        whether the frame was bad, a tuple of the form (top, left), (height, width) indicating the valid sub-area
        of the data, the frame number, and the pixels to skip next time around if the frame is not complete.

        The 'data' keys in the list of dict's should contain a ndarray with the size of the full acquisition and each
        ndarray should be the same size. The 'properties' keys are dicts which must contain the frame parameters and
        a 'channel_id' indicating the index of the channel (may be an int or float).
        """

        if self.__frame is None:
            self.__start_next_frame()
        current_frame = self.__frame
        assert current_frame is not None
        frame_number = current_frame.frame_number

        frame_parameters = current_frame.frame_parameters
        size = Geometry.IntSize.make(
            frame_parameters.subscan_pixel_size if frame_parameters.
            subscan_pixel_size else frame_parameters.size)
        total_pixels = size.height * size.width
        time_slice = 0.005  # 5ms

        if current_frame.scan_data is None:
            scan_data = list()
            for channel in current_frame.channels:
                scan_data.append(
                    self.__instrument.get_scan_data(
                        current_frame.frame_parameters, channel))
            current_frame.scan_data = scan_data

        is_synchronized_scan = frame_parameters.external_clock_mode != 0
        if current_frame.data_count == 0 and is_synchronized_scan:
            self.__instrument.live_probe_position = Geometry.FloatPoint()

        target_count = 0
        while self.__is_scanning and target_count <= current_frame.data_count:
            if is_synchronized_scan:
                # set the probe position
                h, w = current_frame.scan_data[0].shape
                # calculate relative position within sub-area
                ry, rx = current_frame.data_count // w / h - 0.5, current_frame.data_count % w / w - 0.5
                # now translate to context
                ss = Geometry.FloatSize.make(
                    frame_parameters.subscan_fractional_size
                ) if frame_parameters.subscan_fractional_size else Geometry.FloatSize(
                    h=1.0, w=1.0)
                oo = Geometry.FloatPoint.make(
                    frame_parameters.subscan_fractional_center
                ) - Geometry.FloatPoint(
                    y=0.5, x=0.5
                ) if frame_parameters.subscan_fractional_center else Geometry.FloatPoint(
                )
                oo += Geometry.FloatSize(
                    h=frame_parameters.center_nm[0] / frame_parameters.fov_nm,
                    w=frame_parameters.center_nm[1] / frame_parameters.fov_nm)
                pt = Geometry.FloatPoint(y=ry * ss.height + oo.y,
                                         x=rx * ss.width + oo.x)
                pt = pt.rotate(frame_parameters.rotation_rad)
                if frame_parameters.subscan_rotation:
                    pt = pt.rotate(-frame_parameters.subscan_rotation, oo)
                # print(f"{x}, {y} = ({ry} - 0.5) * {ss.height} + {oo.y} / (ry - 0.5) * ss.height + oo.y")
                # >>> def f(s, c, x): return (x - 0.5) * s + c # |------<---c--->------------|
                self.__instrument.live_probe_position = pt + Geometry.FloatPoint(
                    y=0.5, x=0.5)
                # do a synchronized readout
                if current_frame.data_count % size.width == 0:
                    # throw away two flyback images at beginning of line
                    if not self.__is_scanning or not self.__instrument.wait_for_camera_frame(
                            frame_parameters.external_clock_wait_time_ms /
                            1000):
                        current_frame.bad = True
                        current_frame.complete = True
                    if not self.__is_scanning or not self.__instrument.wait_for_camera_frame(
                            frame_parameters.external_clock_wait_time_ms /
                            1000):
                        current_frame.bad = True
                        current_frame.complete = True
                if not self.__is_scanning or not self.__instrument.wait_for_camera_frame(
                        frame_parameters.external_clock_wait_time_ms / 1000):
                    current_frame.bad = True
                    current_frame.complete = True
                target_count = current_frame.data_count + 1
            else:
                pixels_remaining = total_pixels - current_frame.data_count
                pixel_wait = min(
                    pixels_remaining * frame_parameters.pixel_time_us / 1E6,
                    time_slice)
                time.sleep(pixel_wait)
                target_count = min(
                    int((time.time() - current_frame.start_time) /
                        (frame_parameters.pixel_time_us / 1E6)), total_pixels)

        if self.__is_scanning and target_count > current_frame.data_count:
            for channel_index, channel in enumerate(current_frame.channels):
                scan_data_flat = current_frame.scan_data[
                    channel_index].reshape((total_pixels, ))
                channel_data_flat = channel.data.reshape((total_pixels, ))
                channel_data_flat[current_frame.
                                  data_count:target_count] = scan_data_flat[
                                      current_frame.data_count:target_count]
            current_frame.data_count = target_count
            current_frame.complete = current_frame.data_count == total_pixels
        else:
            assert not self.__is_scanning
            current_frame.data_count = total_pixels
            current_frame.complete = True

        data_elements = list()

        for channel in current_frame.channels:
            data_element = dict()
            data_element["data"] = channel.data
            properties = current_frame.frame_parameters.as_dict()
            properties[
                "center_x_nm"] = current_frame.frame_parameters.center_nm[1]
            properties[
                "center_y_nm"] = current_frame.frame_parameters.center_nm[0]
            properties["rotation_deg"] = math.degrees(
                current_frame.frame_parameters.rotation_rad)
            properties["channel_id"] = channel.channel_id
            data_element["properties"] = properties
            data_elements.append(data_element)

        current_rows_read = current_frame.data_count // size.width

        if current_frame.complete:
            sub_area = ((0, 0), size)
            pixels_to_skip = 0
            self.__frame = None
        else:
            sub_area = ((pixels_to_skip // size.width, 0),
                        (current_rows_read - pixels_to_skip // size.width,
                         size.width))
            pixels_to_skip = size.width * current_rows_read

        complete = current_frame.complete
        bad_frame = False

        if complete:
            if len(self.__buffer) > 0 and len(
                    self.__buffer[-1]) != len(data_elements):
                self.__buffer = list()
            self.__buffer.append(data_elements)
            while len(self.__buffer) > 100:
                del self.__buffer[0]
            self.__is_scanning = False

        return data_elements, complete, bad_frame, sub_area, frame_number, pixels_to_skip
示例#11
0
 def reset(self) -> None:
     self.__last_xdata = None
     self.__center_nm = Geometry.FloatSize()
     self.__last_offset_nm = Geometry.FloatSize()
示例#12
0
    def get_frame_data(
        self, readout_area: Geometry.IntRect, binning_shape: Geometry.IntSize,
        exposure_s: float, scan_context: stem_controller.ScanContext,
        parked_probe_position: typing.Optional[Geometry.FloatPoint]
    ) -> DataAndMetadata.DataAndMetadata:
        """
        Features at the probe position will add plasmons and edges in addition to a ZLP.

        There are two inputs to this model: the beam current and the T/l (thickness / mean free path).

        The sum of the spectrum data should add up to the beam current (using counts per electron and conversion from
        electrons to amps).

        The natural log of the ratio of the sum of the spectrum to the sum of the ZLP should equal thickness / mean free
        path.

        The strategy is to have low level routines for adding the shapes of the ZLP (gaussian normal) and plasmons and
        edges (power law multiplied by integrated gaussian normal) and then scaling these shapes such that they satisfy
        the conditions above.

        A complication of this is that the specified energy range may not include the ZLP. So two spectrums are built:
        the one for caller and the one for reference. The reference one is used for calculating the scaling of the ZLP
        and edges, which are then applied to the spectrum for the caller.

        If we define the following values:
            z = sum/integration of unscaled ZLP gaussian
            f = sum/integration of unscaled plasmons/edges
            P = target count such that P / counts_per_electron matches beam current
            T = thickness (nm)
            L = lambda (mean_free_path_nm)
            T/l = thickness / lambda (mean free path)
        then we can solve for two unknowns:
            A = scale of ZLP
            B = scale of plasmons/edges
        using the two equations:
            Az + Bf = P (beam current)
            ln(P / Az) = T/l => P / Az = exp(T/l) (thickness = natural log of ratio of total counts to ZLP counts)
        solving:
            A = P / exp(T/l) / z
            B = (P - Az) / f
        """

        # grab the probe position
        probe_position: typing.Optional[
            Geometry.FloatPoint] = Geometry.FloatPoint(0.5, 0.5)
        if self.instrument.is_blanked:
            probe_position = None
        elif self.instrument.probe_state == "scanning":
            probe_position = self.instrument.live_probe_position
        elif self.instrument.probe_state == "parked" and parked_probe_position is not None:
            probe_position = parked_probe_position
        probe_position = Geometry.FloatPoint.make(
            probe_position) if probe_position is not None else None

        # check if one of the arguments has changed since last call
        new_frame_settings = [
            readout_area, binning_shape, exposure_s,
            copy.deepcopy(scan_context), probe_position
        ]
        if new_frame_settings != self._last_frame_settings:
            self._needs_recalculation = True
        self._last_frame_settings = new_frame_settings

        if self._needs_recalculation or self.__cached_frame is None:
            data: numpy.typing.NDArray[numpy.float_] = numpy.zeros(
                tuple(self._sensor_dimensions), float)
            slit_attenuation = 10 if self.instrument.is_slit_in else 1
            intensity_calibration = Calibration.Calibration(units="counts")
            dimensional_calibrations = self.get_dimensional_calibrations(
                readout_area, binning_shape)

            # typical thickness over mean free path (T/l) will be 0.5
            mean_free_path_nm = 100  # nm. (lambda values from back of Edgerton)
            thickness_per_layer_nm = 30  # nm

            # this is the number of pixel counts expected if the ZLP is visible in vacuum for the given exposure
            # and beam current (in get_total_counts).
            target_pixel_count = self.get_total_counts(
                exposure_s) / data.shape[0]

            # grab the specific calibration for the energy direction and offset by ZLPoffset
            used_calibration = dimensional_calibrations[1]
            used_calibration.offset = typing.cast(
                "InstrumentDevice.Control",
                self.instrument.get_control("ZLPoffset")).local_value

            if scan_context.is_valid and probe_position is not None:

                # make a buffer for the spectrum
                spectrum: numpy.typing.NDArray[numpy.float_] = numpy.zeros(
                    (data.shape[1], ), float)

                # configure a calibration for the reference spectrum. then plot the ZLP on the reference data. sum it to
                # get the zlp_pixel_count and the zlp_scale. this is the value to multiple zlp data by to scale it so
                # that it will produce the target pixel count. since we will be storing the spectra in a 2d array,
                # divide by the height of that array so that when it is summed, the value comes out correctly.
                zlp0_calibration = Calibration.Calibration(
                    scale=used_calibration.scale, offset=-20)
                spectrum_ref: numpy.typing.NDArray[numpy.float_] = numpy.zeros(
                    (int(
                        zlp0_calibration.convert_from_calibrated_value(-20 +
                                                                       1000) -
                        zlp0_calibration.convert_from_calibrated_value(-20)),
                     ), float)
                plot_norm(
                    spectrum_ref, 1.0,
                    Calibration.Calibration(scale=used_calibration.scale,
                                            offset=-20), 0,
                    0.5 / slit_attenuation)
                zlp_ref_pixel_count = float(numpy.sum(spectrum_ref))

                # build the spectrum and reference spectrum by adding the features. the data is unscaled.
                spectrum_ref = numpy.zeros((int(
                    zlp0_calibration.convert_from_calibrated_value(-20 +
                                                                   1000) -
                    zlp0_calibration.convert_from_calibrated_value(-20)), ),
                                           float)
                offset_m = self.instrument.actual_offset_m  # stage position - beam shift + drift
                feature_layer_count = 0
                for index, feature in enumerate(
                        self.instrument.sample.features):
                    scan_context_fov_size_nm = scan_context.fov_size_nm or Geometry.FloatSize(
                    )
                    scan_context_center_nm = scan_context.center_nm or Geometry.FloatPoint(
                    )
                    if feature.intersects(offset_m, scan_context_fov_size_nm,
                                          scan_context_center_nm,
                                          probe_position):
                        plot_spectrum(feature, spectrum, 1.0, used_calibration)
                        plot_spectrum(feature, spectrum_ref, 1.0,
                                      zlp0_calibration)
                        feature_layer_count += 1
                feature_pixel_count = max(
                    typing.cast(float, numpy.sum(spectrum_ref)), 0.01)

                # make the calculations for A, B (zlp_scale and feature_scale).
                thickness_factor = feature_layer_count * thickness_per_layer_nm / mean_free_path_nm
                zlp_scale = target_pixel_count / math.exp(
                    thickness_factor) / zlp_ref_pixel_count
                feature_scale = (
                    target_pixel_count -
                    (target_pixel_count /
                     math.exp(thickness_factor))) / feature_pixel_count
                # print(f"thickness_factor {thickness_factor}")

                # apply the scaling. spectrum holds the features at this point, but not the ZLP. just multiple by
                # feature_scale to make the feature part of the spectrum final. then plot the ZLP scaled by zlp_scale.
                spectrum *= feature_scale
                # print(f"sum {numpy.sum(spectrum) * data.shape[0]}")
                # print(f"zlp_ref_pixel_count {zlp_ref_pixel_count} feature_pixel_count {feature_pixel_count}")
                # print(f"zlp_scale {zlp_scale} feature_scale {feature_scale}")
                plot_norm(spectrum, zlp_scale, used_calibration, 0,
                          0.5 / slit_attenuation)
                # print(f"sum {numpy.sum(spectrum) * data.shape[0]}")
                # print(f"target_pixel_count {target_pixel_count}")

                # finally, store the spectrum into each row of the data
                data[:, ...] = spectrum

                # spectrum_pixel_count = float(numpy.sum(spectrum)) * data.shape[0]
                # print(f"z0 {zlp_ref_pixel_count * data.shape[0]} / {used_calibration.offset}")
                # print(f"beam current {self.instrument.beam_current * 1e12}pA")
                # print(f"current {spectrum_pixel_count / exposure_s / self.instrument.counts_per_electron / 6.242e18 * 1e12:#.2f}pA")
                # print(f"target {target_pixel_count}  actual {spectrum_pixel_count}")
                # print(f"s {spectrum_pixel_count} z {zlp_ref_pixel_count * zlp_scale * data.shape[0]}")
                # print(f"{math.log(spectrum_pixel_count / (zlp_ref_pixel_count * zlp_scale * data.shape[0]))} {thickness_factor}")

            data = self._get_binned_data(data, binning_shape)

            self.__cached_frame = DataAndMetadata.new_data_and_metadata(
                data.astype(numpy.float32),
                intensity_calibration=intensity_calibration,
                dimensional_calibrations=dimensional_calibrations)
            self.__data_scale = self.get_total_counts(
                exposure_s
            ) / target_pixel_count / slit_attenuation / self._sensor_dimensions[
                0]
            self._needs_recalculation = False

        self.noise.poisson_level = self.__data_scale
        return self.noise.apply(self.__cached_frame)
示例#13
0
 def fov_size_nm(self) -> Geometry.FloatSize:
     if self.size.aspect_ratio > 1.0:
         return Geometry.FloatSize(height=self.fov_nm / self.size.aspect_ratio, width=self.fov_nm)
     else:
         return Geometry.FloatSize(height=self.fov_nm, width=self.fov_nm * self.size.aspect_ratio)
示例#14
0
    def get_frame_data(self, readout_area: Geometry.IntRect, binning_shape: Geometry.IntSize, exposure_s: float, scan_context, parked_probe_position) -> DataAndMetadata.DataAndMetadata:
        # check if one of the arguments has changed since last call
        new_frame_settings = [readout_area, binning_shape, exposure_s, copy.deepcopy(scan_context)]
        if new_frame_settings != self._last_frame_settings:
            self._needs_recalculation = True
        if self.instrument.sample != self.__last_sample:
            self._needs_recalculation = True
        self._last_frame_settings = new_frame_settings

        if self._needs_recalculation or self.__cached_frame is None:
            #print("recalculating frame")
            height = readout_area.height
            width = readout_area.width
            offset_m = self.instrument.stage_position_m
            full_fov_nm = self.__stage_size_nm
            fov_size_nm = Geometry.FloatSize(full_fov_nm * height / self._sensor_dimensions.height, full_fov_nm * width / self._sensor_dimensions.width)
            center_nm = Geometry.FloatPoint(full_fov_nm * (readout_area.center.y / self._sensor_dimensions.height- 0.5), full_fov_nm * (readout_area.center.x / self._sensor_dimensions.width - 0.5))
            size = Geometry.IntSize(height, width)
            data = numpy.zeros((height, width), numpy.float32)
            # features will be positive values; thickness can be simulated by subtracting the features from the
            # vacuum value. the higher the vacuum value, the thinner (i.e. less contribution from features).
            thickness_param = 100
            if not self.instrument.is_blanked:
                self.instrument.sample.plot_features(data, offset_m, fov_size_nm, Geometry.FloatPoint(), center_nm, size)
                data = thickness_param - data
            data = self._get_binned_data(data, binning_shape)
            self.__last_sample = self.instrument.sample

            if not self.instrument.is_blanked:
                probe_position = Geometry.FloatPoint(0.5, 0.5)
                if self.instrument.probe_state == "scanning":
                    probe_position = self.instrument.live_probe_position
                elif self.instrument.probe_state == "parked" and parked_probe_position is not None:
                    probe_position = parked_probe_position

                scan_offset = Geometry.FloatPoint()
                if scan_context.is_valid and probe_position is not None:
                    scan_offset = Geometry.FloatPoint(
                        y=probe_position[0] * scan_context.fov_size_nm[0] - scan_context.fov_size_nm[0] / 2,
                        x=probe_position[1] * scan_context.fov_size_nm[1] - scan_context.fov_size_nm[1] / 2)
                    scan_offset = scan_offset*1e-9

                theta = self.__tv_pixel_angle * self._sensor_dimensions.height / 2  # half angle on camera
                aberrations = dict()
                aberrations["height"] = data.shape[0]
                aberrations["width"] = data.shape[1]
                aberrations["theta"] = theta
                aberrations["c0a"] = self.instrument.GetVal2D("beam_shift_m").x + scan_offset[1]
                aberrations["c0b"] = self.instrument.GetVal2D("beam_shift_m").y + scan_offset[0]
                aberrations["c10"] = self.instrument.GetVal("C10Control")
                aberrations["c12a"] = self.instrument.GetVal2D("C12Control").x
                aberrations["c12b"] = self.instrument.GetVal2D("C12Control").y
                aberrations["c21a"] = self.instrument.GetVal2D("C21Control").x
                aberrations["c21b"] = self.instrument.GetVal2D("C21Control").y
                aberrations["c23a"] = self.instrument.GetVal2D("C23Control").x
                aberrations["c23b"] = self.instrument.GetVal2D("C23Control").y
                aberrations["c30"] = self.instrument.GetVal("C30Control")
                aberrations["c32a"] = self.instrument.GetVal2D("C32Control").x
                aberrations["c32b"] = self.instrument.GetVal2D("C32Control").y
                aberrations["c34a"] = self.instrument.GetVal2D("C34Control").x
                aberrations["c34b"] = self.instrument.GetVal2D("C34Control").y
                data = self.__aberrations_controller.apply(aberrations, data)
                if self.instrument.GetVal("S_VOA") > 0:
                    self._draw_aperture(data, binning_shape)
                elif self.instrument.GetVal("S_MOA") > 0:
                    self._draw_aperture(data, binning_shape, enlarge_by=0.1)

            intensity_calibration = Calibration.Calibration(units="counts")
            dimensional_calibrations = self.get_dimensional_calibrations(readout_area, binning_shape)

            self.__cached_frame = DataAndMetadata.new_data_and_metadata(data.astype(numpy.float32), intensity_calibration=intensity_calibration, dimensional_calibrations=dimensional_calibrations)
            self.__data_scale = self.get_total_counts(exposure_s) / (data.shape[0] * data.shape[1] * thickness_param)
            self._needs_recalculation = False

        self.noise.poisson_level = self.__data_scale
        return self.noise.apply(self.__cached_frame * self.__data_scale)