Example #1
0
def align_zlp_xdata(src_xdata: DataAndMetadata.DataAndMetadata, progress_fn=None, method='com', roi: typing.Optional[API_1_0.Graphic]=None, ref_index: int=0) -> typing.Tuple[typing.Optional[DataAndMetadata.DataAndMetadata], typing.Optional[DataAndMetadata.DataAndMetadata]]:
    # check to make sure it is suitable for this algorithm
    # if (src_xdata.is_datum_1d and (src_xdata.is_sequence or src_xdata.is_collection)) or (src_xdata.is_datum_2d and not (src_xdata.is_sequence or src_xdata.is_collection)):
    if src_xdata.is_datum_1d or (src_xdata.is_datum_2d and not (src_xdata.is_sequence or src_xdata.is_collection)):
        # get the numpy array and create the destination data
        src_data = src_xdata.data
        assert src_data is not None

        d_rank = 1
        src_shape = tuple(src_xdata.data_shape)
        d_shape = src_shape[-d_rank:]

        if roi and roi.graphic_type == "interval-graphic":
            data_slice = slice(int(roi.start * d_shape[0]), int(roi.end * d_shape[0]))
        elif roi and roi.graphic_type == "rect-graphic":
            ref_index = int(roi.bounds[0][0] * src_shape[0])
            data_slice = slice(int(roi.bounds[0][1] * d_shape[0]), int((roi.bounds[0][1] + roi.bounds[1][1]) * d_shape[0]))
        else:
            data_slice = slice(0, None)

        flat_src_data = numpy.reshape(src_data, (-1,) + d_shape)
        flat_dst_data = numpy.zeros_like(flat_src_data)
        flat_pos_data = numpy.zeros(flat_src_data.shape[0], dtype=numpy.float32)

        if method == "com":
            get_position_fn = ZLP_Analysis.estimate_zlp_amplitude_position_width_com
            interpolation_order = 1
        elif method == "fit":
            get_position_fn = ZLP_Analysis.estimate_zlp_amplitude_position_width_fit_spline
            interpolation_order = 1
        elif method == "max":
            get_position_fn = lambda data: (None, numpy.argmax(data), None)
            interpolation_order = 0
        else:
            raise ValueError(f"Method {method} is not supported. Allowed options are 'com', 'fit' and 'max'.")

        # use this as the reference position. all other spectra will be aligned to this one.
        ref_pos = get_position_fn(flat_src_data[ref_index, data_slice])[1]
        # put the first spectrum in the result
        flat_dst_data[ref_index] = flat_src_data[ref_index]
        # loop over all non-datum dimensions linearly
        for i in range(len(flat_src_data)):
            if i == ref_index:
                continue
            # the algorithm in this early version is to find the max value
            mx_pos = get_position_fn(flat_src_data[i, data_slice])[1]
            # fallback to simple max if get_position_fun failed
            if mx_pos is numpy.nan:
                mx_pos = numpy.argmax(flat_src_data[i, data_slice])
            # determine the offset and apply it
            offset = ref_pos - mx_pos
            flat_dst_data[i] = scipy.ndimage.shift(flat_src_data[i], offset, order=interpolation_order)
            flat_pos_data[i] = -offset
            # every row, report progress (will also work for a sequence or 1d collection
            # because there we have only 1 row anyways)
            if i % src_shape[1] == 0 and callable(progress_fn):
                progress_fn(i//src_shape[1])

        dimensional_calibrations = copy.deepcopy(src_xdata.dimensional_calibrations)
        energy_calibration = dimensional_calibrations[-1]
        energy_calibration.offset = -(ref_pos + 0.5) * energy_calibration.scale
        dimensional_calibrations = list(dimensional_calibrations[:-1]) + [energy_calibration]
        shift_calibration = copy.copy(energy_calibration)
        shift_calibration.offset = 0

        # dst_data is complete. construct xdata with correct calibration and data descriptor.
        data_descriptor = DataAndMetadata.DataDescriptor(src_xdata.is_sequence, src_xdata.collection_dimension_count, src_xdata.datum_dimension_count)
        shift_xdata = None
        if flat_pos_data.size > 1:
            shift_xdata = DataAndMetadata.new_data_and_metadata(flat_pos_data.reshape(src_shape[:-d_rank]), shift_calibration, dimensional_calibrations[:-d_rank])
        return (DataAndMetadata.new_data_and_metadata(flat_dst_data.reshape(src_shape), src_xdata.intensity_calibration, dimensional_calibrations, data_descriptor=data_descriptor),
                shift_xdata)

    return None, None
Example #2
0
    def get_frame_data(self, readout_area: Geometry.IntRect, binning_shape: Geometry.IntSize, exposure_s: float, scan_context, parked_probe_position) -> DataAndMetadata.DataAndMetadata:
        # check if one of the arguments has changed since last call
        new_frame_settings = [readout_area, binning_shape, exposure_s, copy.deepcopy(scan_context)]
        if new_frame_settings != self._last_frame_settings:
            self._needs_recalculation = True
        if self.instrument.sample != self.__last_sample:
            self._needs_recalculation = True
        self._last_frame_settings = new_frame_settings

        if self._needs_recalculation or self.__cached_frame is None:
            #print("recalculating frame")
            height = readout_area.height
            width = readout_area.width
            offset_m = self.instrument.stage_position_m
            full_fov_nm = self.__stage_size_nm
            fov_size_nm = Geometry.FloatSize(full_fov_nm * height / self._sensor_dimensions.height, full_fov_nm * width / self._sensor_dimensions.width)
            center_nm = Geometry.FloatPoint(full_fov_nm * (readout_area.center.y / self._sensor_dimensions.height- 0.5), full_fov_nm * (readout_area.center.x / self._sensor_dimensions.width - 0.5))
            size = Geometry.IntSize(height, width)
            data = numpy.zeros((height, width), numpy.float32)
            # features will be positive values; thickness can be simulated by subtracting the features from the
            # vacuum value. the higher the vacuum value, the thinner (i.e. less contribution from features).
            thickness_param = 100
            if not self.instrument.is_blanked:
                self.instrument.sample.plot_features(data, offset_m, fov_size_nm, Geometry.FloatPoint(), center_nm, size)
                data = thickness_param - data
            data = self._get_binned_data(data, binning_shape)
            self.__last_sample = self.instrument.sample

            if not self.instrument.is_blanked:
                probe_position = Geometry.FloatPoint(0.5, 0.5)
                if self.instrument.probe_state == "scanning":
                    probe_position = self.instrument.live_probe_position
                elif self.instrument.probe_state == "parked" and parked_probe_position is not None:
                    probe_position = parked_probe_position

                scan_offset = Geometry.FloatPoint()
                if scan_context.is_valid and probe_position is not None:
                    scan_offset = Geometry.FloatPoint(
                        y=probe_position[0] * scan_context.fov_size_nm[0] - scan_context.fov_size_nm[0] / 2,
                        x=probe_position[1] * scan_context.fov_size_nm[1] - scan_context.fov_size_nm[1] / 2)
                    scan_offset = scan_offset*1e-9

                theta = self.__tv_pixel_angle * self._sensor_dimensions.height / 2  # half angle on camera
                aberrations = dict()
                aberrations["height"] = data.shape[0]
                aberrations["width"] = data.shape[1]
                aberrations["theta"] = theta
                aberrations["c0a"] = self.instrument.GetVal2D("beam_shift_m").x + scan_offset[1]
                aberrations["c0b"] = self.instrument.GetVal2D("beam_shift_m").y + scan_offset[0]
                aberrations["c10"] = self.instrument.GetVal("C10Control")
                aberrations["c12a"] = self.instrument.GetVal2D("C12Control").x
                aberrations["c12b"] = self.instrument.GetVal2D("C12Control").y
                aberrations["c21a"] = self.instrument.GetVal2D("C21Control").x
                aberrations["c21b"] = self.instrument.GetVal2D("C21Control").y
                aberrations["c23a"] = self.instrument.GetVal2D("C23Control").x
                aberrations["c23b"] = self.instrument.GetVal2D("C23Control").y
                aberrations["c30"] = self.instrument.GetVal("C30Control")
                aberrations["c32a"] = self.instrument.GetVal2D("C32Control").x
                aberrations["c32b"] = self.instrument.GetVal2D("C32Control").y
                aberrations["c34a"] = self.instrument.GetVal2D("C34Control").x
                aberrations["c34b"] = self.instrument.GetVal2D("C34Control").y
                data = self.__aberrations_controller.apply(aberrations, data)
                if self.instrument.GetVal("S_VOA") > 0:
                    self._draw_aperture(data, binning_shape)
                elif self.instrument.GetVal("S_MOA") > 0:
                    self._draw_aperture(data, binning_shape, enlarge_by=0.1)

            intensity_calibration = Calibration.Calibration(units="counts")
            dimensional_calibrations = self.get_dimensional_calibrations(readout_area, binning_shape)

            self.__cached_frame = DataAndMetadata.new_data_and_metadata(data.astype(numpy.float32), intensity_calibration=intensity_calibration, dimensional_calibrations=dimensional_calibrations)
            self.__data_scale = self.get_total_counts(exposure_s) / (data.shape[0] * data.shape[1] * thickness_param)
            self._needs_recalculation = False

        self.noise.poisson_level = self.__data_scale
        return self.noise.apply(self.__cached_frame * self.__data_scale)
Example #3
0
        def acquire_series(
            number_frames: int,
            energy_offset: float,
            dark_ref_enabled: bool,
            dark_ref_data: typing.Optional[_NDArray],
            task_object: typing.Optional[Task.TaskContextManager] = None
        ) -> DataItem.DataItem:
            logging.info("Starting image acquisition.")

            # grab one frame to get image size
            first_xdatas = camera.get_next_xdatas_to_start()
            first_xdata = first_xdatas[0] if first_xdatas else None
            first_data = first_xdata.data if first_xdata else None
            assert first_xdata is not None
            assert first_data is not None

            # Initialize an empty stack to fill with acquired data
            image_stack_data: numpy.typing.NDArray[numpy.float_] = numpy.empty(
                (number_frames, first_data.shape[0], first_data.shape[1]),
                dtype=float)

            reference_energy = stem_controller.GetVal(energy_adjust_control)

            # loop through the frames and fill in the empty stack from the
            # camera
            for frame_index in range(number_frames):
                if energy_offset == 0.:
                    set_offset_energy(energy_offset, 1)
                # use next frame to start to make sure we're getting a
                # frame with the new energy offset
                if frame_index == 0:
                    xdatas = camera.get_next_xdatas_to_start()
                    xdata0 = xdatas[0] if xdatas else None
                    data = xdata0.data if xdata0 else None
                    assert data is not None
                    image_stack_data[frame_index] = data
                else:
                    # grab the following frames
                    xdatas = camera.get_next_xdatas_to_finish()
                    xdata0 = xdatas[0] if xdatas else None
                    data = xdata0.data if xdata0 else None
                    assert data is not None
                    image_stack_data[frame_index] = data
                if task_object is not None:
                    # Update the task panel with the progress
                    task_object.update_progress(
                        _("Grabbing EELS data frame {}.").format(frame_index +
                                                                 1),
                        (frame_index + 1, number_frames), None)

            # Blank the beam
            stem_controller.SetValWait(blank_control, 1.0, 200)
            # load dark ref file
            if dark_ref_enabled:
                # User desires a dark reference to be applied
                dark_sum: typing.Optional[_NDArray] = None
                if dark_ref_data is not None:
                    # User has provided a valid dark reference file
                    if task_object is not None:
                        task_object.update_progress(
                            _("Applying dark reference"), None)
                    dark_sum = dark_ref_data
                else:
                    # User has not provided a valid dark reference, so a
                    # dark reference will be acquired
                    dark_sum = acquire_dark(number_frames, sleep_time,
                                            task_object)
                # Apply dark reference data to the image stack
                if dark_sum is not None:
                    image_stack_data -= dark_sum / number_frames

            stem_controller.SetVal(energy_adjust_control, reference_energy)

            dimension_calibration0 = first_xdata.dimensional_calibrations[0]
            dimension_calibration1 = first_xdata.dimensional_calibrations[1]
            # TODO: replace frame calibration with acquisition time
            # (this is effectively chronospectroscopy before the sum)
            sequence_calibration = Calibration.Calibration(units="frame")
            # numpy zero array is dummy data
            image_stack_data_item = DataItem.DataItem(numpy.zeros((8, 8)))
            # Insert acquired data into a calibrated image stack
            image_stack_data_item.set_xdata(
                DataAndMetadata.new_data_and_metadata(
                    image_stack_data,
                    dimensional_calibrations=[
                        sequence_calibration, dimension_calibration0,
                        dimension_calibration1
                    ],
                    data_descriptor=DataAndMetadata.DataDescriptor(True, 0,
                                                                   2)))

            return image_stack_data_item
 def execute(self, eels_spectrum_data_item, background_model,
             fit_interval_graphics, **kwargs) -> None:
     try:
         spectrum_xdata = eels_spectrum_data_item.xdata
         assert spectrum_xdata.is_datum_1d
         assert spectrum_xdata.datum_dimensional_calibrations[
             0].units == "eV"
         eels_spectrum_xdata = spectrum_xdata
         # fit_interval_graphics.interval returns normalized coordinates. create calibrated intervals.
         fit_intervals = list()
         for fit_interval_graphic in fit_interval_graphics:
             fit_interval = Calibration.CalibratedInterval(
                 Calibration.Coordinate(
                     Calibration.CoordinateType.NORMALIZED,
                     fit_interval_graphic.interval[0]),
                 Calibration.Coordinate(
                     Calibration.CoordinateType.NORMALIZED,
                     fit_interval_graphic.interval[1]))
             fit_intervals.append(fit_interval)
         fit_minimum = min(
             [fit_interval.start.value for fit_interval in fit_intervals])
         signal_interval = Calibration.CalibratedInterval(
             Calibration.Coordinate(Calibration.CoordinateType.NORMALIZED,
                                    fit_minimum),
             Calibration.Coordinate(Calibration.CoordinateType.NORMALIZED,
                                    1.0))
         reference_frame = Calibration.ReferenceFrameAxis(
             eels_spectrum_xdata.datum_dimensional_calibrations[0],
             eels_spectrum_xdata.datum_dimension_shape[0])
         signal_xdata = Core.get_calibrated_interval_slice(
             eels_spectrum_xdata, reference_frame, signal_interval)
         background_xdata = None
         subtracted_xdata = None
         if background_model._data_structure.entity:
             entity_id = background_model._data_structure.entity.entity_type.entity_id
             for component in Registry.get_components_by_type(
                     "background-model"):
                 if entity_id == component.background_model_id:
                     fit_result = component.fit_background(
                         spectrum_xdata=spectrum_xdata,
                         fit_intervals=fit_intervals,
                         background_interval=signal_interval)
                     background_xdata = fit_result["background_model"]
                     # use 'or' to avoid doing subtraction if subtracted_spectrum already present
                     subtracted_xdata = fit_result.get(
                         "subtracted_spectrum",
                         None) or Core.calibrated_subtract_spectrum(
                             spectrum_xdata, background_xdata)
         if background_xdata is None:
             background_xdata = DataAndMetadata.new_data_and_metadata(
                 numpy.zeros_like(signal_xdata.data),
                 intensity_calibration=signal_xdata.intensity_calibration,
                 dimensional_calibrations=signal_xdata.
                 dimensional_calibrations)
         if subtracted_xdata is None:
             subtracted_xdata = DataAndMetadata.new_data_and_metadata(
                 signal_xdata.data,
                 intensity_calibration=signal_xdata.intensity_calibration,
                 dimensional_calibrations=signal_xdata.
                 dimensional_calibrations)
         self.__background_xdata = background_xdata
         self.__subtracted_xdata = subtracted_xdata
     except Exception as e:
         import traceback
         print(traceback.format_exc())
         print(e)
         raise
Example #5
0
def load_image(file) -> DataAndMetadata.DataAndMetadata:
    """
    Loads the image from the file-like object or string file.
    If file is a string, the file is opened and then read.
    Returns a numpy ndarray of our best guess for the most important image
    in the file.
    """
    if isinstance(file, str) or isinstance(file, str):
        with open(file, "rb") as f:
            return load_image(f)
    dmtag = parse_dm3.parse_dm_header(file)
    dmtag = fix_strings(dmtag)
    # display_keys(dmtag)
    img_index = -1
    image_tags = dmtag['ImageList'][img_index]
    data = imagedatadict_to_ndarray(image_tags['ImageData'])
    calibrations = []
    calibration_tags = image_tags['ImageData'].get('Calibrations', dict())
    for dimension in calibration_tags.get('Dimension', list()):
        origin, scale, units = dimension.get('Origin', 0.0), dimension.get(
            'Scale', 1.0), dimension.get('Units', str())
        calibrations.append((-origin * scale, scale, units))
    calibrations = tuple(reversed(calibrations))
    if len(data.shape) == 3 and data.dtype != numpy.uint8:
        if image_tags['ImageTags'].get('Meta Data', dict()).get(
                "Format", str()).lower() in ("spectrum", "spectrum image"):
            if data.shape[1] == 1:
                data = numpy.squeeze(data, 1)
                data = numpy.moveaxis(data, 0, 1)
                data_descriptor = DataAndMetadata.DataDescriptor(False, 1, 1)
                calibrations = (calibrations[2], calibrations[0])
            else:
                data = numpy.moveaxis(data, 0, 2)
                data_descriptor = DataAndMetadata.DataDescriptor(False, 2, 1)
                calibrations = tuple(calibrations[1:]) + (calibrations[0], )
        else:
            data_descriptor = DataAndMetadata.DataDescriptor(False, 1, 2)
    elif len(data.shape) == 4 and data.dtype != numpy.uint8:
        # data = numpy.moveaxis(data, 0, 2)
        data_descriptor = DataAndMetadata.DataDescriptor(False, 2, 2)
    elif data.dtype == numpy.uint8:
        data_descriptor = DataAndMetadata.DataDescriptor(
            False, 0, len(data.shape[:-1]))
    else:
        data_descriptor = DataAndMetadata.DataDescriptor(
            False, 0, len(data.shape))
    brightness = calibration_tags.get('Brightness', dict())
    origin, scale, units = brightness.get('Origin', 0.0), brightness.get(
        'Scale', 1.0), brightness.get('Units', str())
    intensity = -origin * scale, scale, units
    timestamp = None
    timezone = None
    timezone_offset = None
    title = image_tags.get('Name')
    properties = dict()
    if 'ImageTags' in image_tags:
        voltage = image_tags['ImageTags'].get('ImageScanned',
                                              dict()).get('EHT', dict())
        if voltage:
            properties.setdefault("hardware_source", dict())["autostem"] = {
                "high_tension_v": float(voltage)
            }
        dm_metadata_signal = image_tags['ImageTags'].get('Meta Data',
                                                         dict()).get('Signal')
        if dm_metadata_signal and dm_metadata_signal.lower() == "eels":
            properties.setdefault("hardware_source",
                                  dict())["signal_type"] = dm_metadata_signal
        if image_tags['ImageTags'].get('Meta Data', dict()).get(
                "Format", str()).lower() in ("spectrum", "spectrum image"):
            data_descriptor.collection_dimension_count += data_descriptor.datum_dimension_count - 1
            data_descriptor.datum_dimension_count = 1
        if image_tags['ImageTags'].get('Meta Data', dict()).get(
                "IsSequence",
                False) and data_descriptor.collection_dimension_count > 0:
            data_descriptor.is_sequence = True
            data_descriptor.collection_dimension_count -= 1
        timestamp_str = image_tags['ImageTags'].get("Timestamp")
        if timestamp_str:
            timestamp = get_datetime_from_timestamp_str(timestamp_str)
        timezone = image_tags['ImageTags'].get("Timezone")
        timezone_offset = image_tags['ImageTags'].get("TimezoneOffset")
        # to avoid having duplicate copies in Swift, get rid of these tags
        image_tags['ImageTags'].pop("Timestamp", None)
        image_tags['ImageTags'].pop("Timezone", None)
        image_tags['ImageTags'].pop("TimezoneOffset", None)
        # put the image tags into properties
        properties.update(image_tags['ImageTags'])
    dimensional_calibrations = [
        Calibration.Calibration(c[0], c[1], c[2]) for c in calibrations
    ]
    while len(dimensional_calibrations
              ) < data_descriptor.expected_dimension_count:
        dimensional_calibrations.append(Calibration.Calibration())
    intensity_calibration = Calibration.Calibration(intensity[0], intensity[1],
                                                    intensity[2])
    return DataAndMetadata.new_data_and_metadata(
        data,
        data_descriptor=data_descriptor,
        dimensional_calibrations=dimensional_calibrations,
        intensity_calibration=intensity_calibration,
        metadata=properties,
        timestamp=timestamp,
        timezone=timezone,
        timezone_offset=timezone_offset)
 def __fit_background(
     self, spectrum_xdata: DataAndMetadata.DataAndMetadata,
     fit_intervals: typing.Sequence[Calibration.CalibratedInterval],
     background_interval: Calibration.CalibratedInterval
 ) -> DataAndMetadata.DataAndMetadata:
     reference_frame = Calibration.ReferenceFrameAxis(
         spectrum_xdata.datum_dimensional_calibrations[0],
         spectrum_xdata.datum_dimension_shape[0])
     # fit polynomial to the data
     xs = numpy.concatenate([
         Core.get_calibrated_interval_domain(reference_frame, fit_interval)
         for fit_interval in fit_intervals
     ])
     if len(fit_intervals) > 1:
         ys = numpy.concatenate([
             Core.get_calibrated_interval_slice(spectrum_xdata,
                                                reference_frame,
                                                fit_interval).data
             for fit_interval in fit_intervals
         ])
     else:
         ys = Core.get_calibrated_interval_slice(spectrum_xdata,
                                                 reference_frame,
                                                 fit_intervals[0]).data
     # generate background model data from the series
     n = reference_frame.convert_to_pixel(
         background_interval.end
     ).int_value - reference_frame.convert_to_pixel(
         background_interval.start).int_value
     interval_start = reference_frame.convert_to_calibrated(
         background_interval.start).value
     interval_end = reference_frame.convert_to_calibrated(
         background_interval.end).value
     interval_end -= (interval_end - interval_start
                      ) / n  # n samples at the left edges of each pixel
     calibration = copy.deepcopy(
         spectrum_xdata.datum_dimensional_calibrations[0])
     calibration.offset = reference_frame.convert_to_calibrated(
         background_interval.start).value
     fs = numpy.linspace(interval_start, interval_end, n)
     if spectrum_xdata.is_navigable:
         calibrations = list(
             copy.deepcopy(
                 spectrum_xdata.navigation_dimensional_calibrations)) + [
                     calibration
                 ]
         yss = numpy.reshape(ys, (numpy.product(ys.shape[:-1]), ) +
                             (ys.shape[-1], ))
         fit_data = self._perform_fits(xs, yss, fs)
         data_descriptor = DataAndMetadata.DataDescriptor(
             False, spectrum_xdata.navigation_dimension_count,
             spectrum_xdata.datum_dimension_count)
         background_xdata = DataAndMetadata.new_data_and_metadata(
             numpy.reshape(fit_data, ys.shape[:-1] + (n, )),
             data_descriptor=data_descriptor,
             dimensional_calibrations=calibrations,
             intensity_calibration=spectrum_xdata.intensity_calibration)
     else:
         poly_data = self._perform_fit(xs, ys, fs)
         background_xdata = DataAndMetadata.new_data_and_metadata(
             poly_data,
             dimensional_calibrations=[calibration],
             intensity_calibration=spectrum_xdata.intensity_calibration)
     return background_xdata
    async def grab(self, document_controller, hardware_source, do_acquire):
        # this is an async method meaning that it will execute until it calls await, at which time
        # it will let other parts of the software run until the awaited function finishes. in this
        # case, waiting for acquired data and grabbing the last frames are run in a thread.

        assert document_controller
        assert hardware_source

        event_loop = document_controller.event_loop

        self.cancel_event.clear()

        self.state.value = "running"
        self.progress_model.value = 0
        frame_count = self.frame_count_model.value
        was_playing = hardware_source.is_playing

        success_ref = [True]

        xdata_group_list = list()

        def exec_acquire():
            # this will execute in a thread; the enclosing async routine will continue when it finishes
            try:
                start_time = time.time()
                max_wait_time = max(hardware_source.get_current_frame_time() * 1.5, 3)
                while not hardware_source.is_playing:
                    if time.time() - start_time > max_wait_time:
                        success_ref[0] = False
                        return
                    time.sleep(0.01)
                hardware_source.get_next_xdatas_to_start(max_wait_time * 2)  # wait for frame + next frame
                for i in range(frame_count):
                    self.progress_model.value = int(100 * i / frame_count)
                    if self.cancel_event.is_set():
                        success_ref[0] = False
                        break
                    hardware_source.get_next_xdatas_to_finish(max_wait_time * 2)
            except Exception as e:
                import traceback
                traceback.print_exc()
                success_ref[0] = False

        if do_acquire:
            print("AR: start playing")
            hardware_source.start_playing()
            print("AR: wait for acquire")
            await event_loop.run_in_executor(None, exec_acquire)
            print("AR: acquire finished")

        def exec_grab():
            # this will execute in a thread; the enclosing async routine will continue when it finishes
            try:
                start_time = time.time()
                max_wait_time = max(hardware_source.get_current_frame_time() * 1.5, 3)
                while hardware_source.is_playing:
                    if time.time() - start_time > max_wait_time:
                        success_ref[0] = False
                        return
                    time.sleep(0.01)
                data_element_groups = hardware_source.get_buffer_data(-frame_count, frame_count)
                for data_element_group in data_element_groups:
                    if self.cancel_event.is_set():
                        success_ref[0] = False
                        break
                    xdata_group = list()
                    for data_element in data_element_group:
                        xdata = ImportExportManager.convert_data_element_to_data_and_metadata(data_element)
                        xdata_group.append(xdata)
                    xdata_group_list.append(xdata_group)
                self.progress_model.value = 100
            except Exception as e:
                import traceback
                traceback.print_exc()
                success_ref[0] = False

        if success_ref[0]:
            print("AR: stop playing")
            hardware_source.stop_playing()
            print("AR: grabbing data")
            await event_loop.run_in_executor(None, exec_grab)
            print("AR: grab finished")

        xdata_group = None

        if success_ref[0]:
            if len(xdata_group_list) > 1:
                print("AR: making xdata")
                valid_count = 0
                examplar_xdata_group = xdata_group_list[-1]
                shapes = [xdata.data.shape for xdata in examplar_xdata_group]
                dtypes = [xdata.data.dtype for xdata in examplar_xdata_group]
                for xdata_group in reversed(xdata_group_list):
                    shapes_i = [xdata.data.shape for xdata in xdata_group]
                    dtypes_i = [xdata.data.dtype for xdata in xdata_group]
                    if shapes_i == shapes and dtypes_i == dtypes:
                        valid_count += 1
                xdata_group = list()
                for i, xdata in enumerate(examplar_xdata_group):
                    intensity_calibration = xdata.intensity_calibration
                    dimensional_calibrations = [Calibration.Calibration()] + list(xdata.dimensional_calibrations)
                    data_descriptor = DataAndMetadata.DataDescriptor(True,
                                                                     xdata.data_descriptor.collection_dimension_count,
                                                                     xdata.data_descriptor.datum_dimension_count)
                    data = numpy.vstack(list(xdata_group[i].data for xdata_group in xdata_group_list[-valid_count:])).reshape(valid_count, *shapes[i])
                    xdata = DataAndMetadata.new_data_and_metadata(data,
                                                                  intensity_calibration=intensity_calibration,
                                                                  dimensional_calibrations=dimensional_calibrations,
                                                                  data_descriptor=data_descriptor,
                                                                  metadata=xdata.metadata)
                    xdata_group.append(xdata)
            elif len(xdata_group_list) == 1:
                xdata_group = xdata_group_list[0]

        if xdata_group:
            print("AR: making data item")
            for xdata in xdata_group:
                data_item = DataItem.DataItem(large_format=True)
                data_item.ensure_data_source()
                data_item.set_xdata(xdata)
                channel_name = xdata.metadata.get("hardware_source", dict()).get("channel_name")
                channel_ext = (" (" + channel_name + ")") if channel_name else ""
                data_item.title = _("Recording of ") + hardware_source.display_name + channel_ext
                document_controller.document_model.append_data_item(data_item)
                display_item = document_controller.document_model.get_display_item_for_data_item(data_item)
                document_controller.show_display_item(display_item)

        if was_playing:
            print("AR: restarting")
            hardware_source.start_playing()
        self.state.value = "idle"
        self.progress_model.value = 0
        print("AR: done")
Example #8
0
def convert_data_element_to_data_and_metadata_1(
        data_element) -> DataAndMetadata.DataAndMetadata:
    """Convert a data element to xdata. No data copying occurs.

    The data element can have the following keys:
        data (required)
        is_sequence, collection_dimension_count, datum_dimension_count (optional description of the data)
        spatial_calibrations (optional list of spatial calibration dicts, scale, offset, units)
        intensity_calibration (optional intensity calibration dict, scale, offset, units)
        metadata (optional)
        properties (get stored into metadata.hardware_source)
        one of either timestamp or datetime_modified
        if datetime_modified (dst, tz) it is converted and used as timestamp
            then timezone gets stored into metadata.description.timezone.
    """
    # data. takes ownership.
    data = data_element["data"]
    dimensional_shape = Image.dimensional_shape_from_data(data)
    is_sequence = data_element.get("is_sequence", False)
    dimension_count = len(Image.dimensional_shape_from_data(data))
    adjusted_dimension_count = dimension_count - (1 if is_sequence else 0)
    collection_dimension_count = data_element.get(
        "collection_dimension_count",
        2 if adjusted_dimension_count in (3, 4) else 0)
    datum_dimension_count = data_element.get(
        "datum_dimension_count",
        adjusted_dimension_count - collection_dimension_count)
    data_descriptor = DataAndMetadata.DataDescriptor(
        is_sequence, collection_dimension_count, datum_dimension_count)

    # dimensional calibrations
    dimensional_calibrations = None
    if "spatial_calibrations" in data_element:
        dimensional_calibrations_list = data_element.get(
            "spatial_calibrations")
        if len(dimensional_calibrations_list) == len(dimensional_shape):
            dimensional_calibrations = list()
            for dimension_calibration in dimensional_calibrations_list:
                offset = float(dimension_calibration.get("offset", 0.0))
                scale = float(dimension_calibration.get("scale", 1.0))
                units = dimension_calibration.get("units", "")
                units = str(units) if units is not None else str()
                if scale != 0.0:
                    dimensional_calibrations.append(
                        Calibration.Calibration(offset, scale, units))
                else:
                    dimensional_calibrations.append(Calibration.Calibration())

    # intensity calibration
    intensity_calibration = None
    if "intensity_calibration" in data_element:
        intensity_calibration_dict = data_element.get("intensity_calibration")
        offset = float(intensity_calibration_dict.get("offset", 0.0))
        scale = float(intensity_calibration_dict.get("scale", 1.0))
        units = intensity_calibration_dict.get("units", "")
        units = str(units) if units is not None else str()
        if scale != 0.0:
            intensity_calibration = Calibration.Calibration(
                offset, scale, units)

    # properties (general tags)
    metadata = dict()
    if "metadata" in data_element:
        metadata.update(Utility.clean_dict(data_element.get("metadata")))
    if "properties" in data_element and data_element["properties"]:
        hardware_source_metadata = metadata.setdefault("hardware_source",
                                                       dict())
        hardware_source_metadata.update(
            Utility.clean_dict(data_element.get("properties")))

    # dates are _local_ time and must use this specific ISO 8601 format. 2013-11-17T08:43:21.389391
    # time zones are offsets (east of UTC) in the following format "+HHMM" or "-HHMM"
    # daylight savings times are time offset (east of UTC) in format "+MM" or "-MM"
    # timezone is for conversion and is the Olson timezone string.
    # datetime.datetime.strptime(datetime.datetime.isoformat(datetime.datetime.now()), "%Y-%m-%dT%H:%M:%S.%f" )
    # datetime_modified, datetime_modified_tz, datetime_modified_dst, datetime_modified_tzname is the time at which this image was modified.
    # datetime_original, datetime_original_tz, datetime_original_dst, datetime_original_tzname is the time at which this image was created.
    timestamp = data_element.get("timestamp", datetime.datetime.utcnow())
    datetime_item = data_element.get(
        "datetime_modified",
        Utility.get_datetime_item_from_utc_datetime(timestamp))

    local_datetime = Utility.get_datetime_from_datetime_item(datetime_item)
    dst_value = datetime_item.get("dst", "+00")
    tz_value = datetime_item.get("tz", "+0000")
    timezone = datetime_item.get("timezone")
    time_zone = {"dst": dst_value, "tz": tz_value}
    if timezone is not None:
        time_zone["timezone"] = timezone
    # note: dst is informational only; tz already include dst
    tz_adjust = (int(tz_value[1:3]) * 60 +
                 int(tz_value[3:5])) * (-1 if tz_value[0] == '-' else 1)
    utc_datetime = local_datetime - datetime.timedelta(
        minutes=tz_adjust)  # tz_adjust already contains dst_adjust
    timestamp = utc_datetime

    return DataAndMetadata.new_data_and_metadata(
        data,
        intensity_calibration=intensity_calibration,
        dimensional_calibrations=dimensional_calibrations,
        metadata=metadata,
        timestamp=timestamp,
        data_descriptor=data_descriptor,
        timezone=timezone,
        timezone_offset=tz_value)
Example #9
0
def function_rgba(
    red_data_and_metadata: DataAndMetadata.DataAndMetadata,
    green_data_and_metadata: DataAndMetadata.DataAndMetadata,
    blue_data_and_metadata: DataAndMetadata.DataAndMetadata,
    alpha_data_and_metadata: DataAndMetadata.DataAndMetadata
) -> typing.Optional[DataAndMetadata.DataAndMetadata]:

    red_data_and_metadata = DataAndMetadata.promote_ndarray(
        red_data_and_metadata)
    green_data_and_metadata = DataAndMetadata.promote_ndarray(
        green_data_and_metadata)
    blue_data_and_metadata = DataAndMetadata.promote_ndarray(
        blue_data_and_metadata)
    alpha_data_and_metadata = DataAndMetadata.promote_ndarray(
        alpha_data_and_metadata)

    shape = tuple(
        DataAndMetadata.determine_shape(red_data_and_metadata,
                                        green_data_and_metadata,
                                        blue_data_and_metadata,
                                        alpha_data_and_metadata))

    red_data_and_metadata = DataAndMetadata.promote_constant(
        red_data_and_metadata, shape)
    green_data_and_metadata = DataAndMetadata.promote_constant(
        green_data_and_metadata, shape)
    blue_data_and_metadata = DataAndMetadata.promote_constant(
        blue_data_and_metadata, shape)
    alpha_data_and_metadata = DataAndMetadata.promote_constant(
        alpha_data_and_metadata, shape)

    shape = tuple(red_data_and_metadata.data_shape)

    def calculate_data():
        rgba_image = numpy.empty(shape + (4, ), numpy.uint8)
        channels = (blue_data_and_metadata, green_data_and_metadata,
                    red_data_and_metadata, alpha_data_and_metadata)
        for channel_index, channel in enumerate(channels):
            data = channel.data

            if not Image.is_data_valid(data):
                return None

            if tuple(data.shape) != shape:
                return None

            if data.dtype.kind in 'iu':
                rgba_image[..., channel_index] = numpy.clip(data, 0, 255)
            elif data.dtype.kind in 'f':
                rgba_image[..., channel_index] = numpy.clip(
                    numpy.multiply(data, 255), 0, 255)
            else:
                return None
        return rgba_image

    if tuple(green_data_and_metadata.data_shape) != shape or tuple(
            blue_data_and_metadata.data_shape) != shape or tuple(
                alpha_data_and_metadata.data_shape) != shape:
        return None

    return DataAndMetadata.new_data_and_metadata(
        calculate_data(),
        intensity_calibration=red_data_and_metadata.intensity_calibration,
        dimensional_calibrations=red_data_and_metadata.dimensional_calibrations
    )
Example #10
0
    def continue_recording(self, current_time: float) -> None:
        if self.__recording_state == "recording":
            if (current_time - self.__recording_start
                ) // self.__recording_interval > self.__recording_index - 1:
                self.__recording_last = current_time
                # first create an empty data item to hold the recorded data if it doesn't already exist
                if not self.__recording_data_item:
                    data_item = DataItem.DataItem(large_format=True)
                    data_item.ensure_data_source()
                    data_item.title = _(
                        "Recording of ") + self.__data_item.title

                    def process():
                        self.__document_model.append_data_item(data_item)
                        display_item = self.__document_controller.document_model.get_display_item_for_data_item(
                            data_item)
                        self.__document_controller.show_display_item(
                            display_item)
                        return data_item

                    command = Recorder.RecorderInsertDataItemCommandCommand(
                        self.__document_controller, self, process)
                    command.perform()
                    self.__document_controller.push_undo_command(command)

                    self.__recording_data_item = data_item
                # next grab the current data and stop if it is a sequence (can't record sequences)
                current_xdata = self.__last_complete_xdata
                if self.__recording_error:
                    self.__stop_recording()
                    return
                if not current_xdata:
                    # no first image yet
                    return
                # now record the new data. it may or may not be a new frame at this point.
                last_xdata = self.__recording_data_item.xdata
                self.__recording_index += 1
                if current_xdata and last_xdata and current_xdata.data_shape == self.__recording_data_item.data_shape[
                        1:]:
                    # continue, append the new data to existing data and update existing data item
                    intensity_calibration = last_xdata.intensity_calibration
                    dimensional_calibrations = last_xdata.dimensional_calibrations
                    data_descriptor = last_xdata.data_descriptor
                    sequence_xdata = DataAndMetadata.new_data_and_metadata(
                        numpy.vstack([last_xdata.data, [current_xdata.data]]),
                        intensity_calibration=intensity_calibration,
                        dimensional_calibrations=dimensional_calibrations,
                        data_descriptor=data_descriptor)
                    self.__recording_data_item.set_xdata(sequence_xdata)
                elif current_xdata and not last_xdata:
                    # first acquisition, create the sequence
                    intensity_calibration = current_xdata.intensity_calibration
                    dimensional_calibrations = [
                        Calibration.Calibration(
                            scale=self.__recording_interval, units="s")
                    ] + list(current_xdata.dimensional_calibrations)
                    data_descriptor = DataAndMetadata.DataDescriptor(
                        True, current_xdata.data_descriptor.
                        collection_dimension_count,
                        current_xdata.data_descriptor.datum_dimension_count)
                    sequence_xdata = DataAndMetadata.new_data_and_metadata(
                        current_xdata.data[numpy.newaxis, ...],
                        intensity_calibration=intensity_calibration,
                        dimensional_calibrations=dimensional_calibrations,
                        data_descriptor=data_descriptor)
                    self.__recording_data_item.set_xdata(sequence_xdata)
                    self.__recording_transaction = self.__document_model.item_transaction(
                        self.__recording_data_item)
                else:
                    # something is amiss. stop.
                    self.__stop_recording()
                    return
            # finally -- check if we've reached the maximum count
            if self.__recording_index >= self.__recording_count:
                self.__stop_recording()