Пример #1
0
 def exec_grab():
     # this will execute in a thread; the enclosing async routine will continue when it finishes
     try:
         start_time = time.time()
         max_wait_time = max(
             hardware_source.get_current_frame_time() * 1.5, 3)
         while hardware_source.is_playing:
             if time.time() - start_time > max_wait_time:
                 success_ref[0] = False
                 return
             time.sleep(0.01)
         data_element_groups = hardware_source.get_buffer_data(
             -frame_count, frame_count)
         for data_element_group in data_element_groups:
             if self.cancel_event.is_set():
                 success_ref[0] = False
                 break
             xdata_group = list()
             for data_element in data_element_group:
                 xdata = ImportExportManager.convert_data_element_to_data_and_metadata(
                     data_element)
                 xdata_group.append(xdata)
             xdata_group_list.append(xdata_group)
         self.progress_model.value = 100
     except Exception as e:
         import traceback
         traceback.print_exc()
         success_ref[0] = False
 def create_result_data_item(
         self, data_dict: typing.Mapping[str, typing.Any]) -> None:
     reset_color_cycle()
     display_item = None
     sorted_indices = numpy.argsort(
         [parms['start_ev'] for parms in data_dict['parameter_list']])
     display_layer_index = 0
     for i in sorted_indices:
         index = data_dict['parameter_list'][i]['index']
         xdata = ImportExportManager.convert_data_element_to_data_and_metadata(
             data_dict['data_element_list'][i])
         start_ev = data_dict['parameter_list'][i]['start_ev']
         end_ev = data_dict['parameter_list'][i]['end_ev']
         number_frames = data_dict['parameter_list'][i]['frames']
         exposure_ms = data_dict['parameter_list'][i]['exposure_ms']
         summed = ' (summed)' if not data_dict['data_element_list'][i].get(
             'is_sequence', False) and number_frames > 1 else ''
         data_item = None
         if i == sorted_indices[
                 0] and xdata.datum_dimension_count == 1 and data_dict[
                     'settings_list'][i]['use_multi_eels_calibration']:
             data_item = self.document_controller.library.create_data_item_from_data_and_metadata(
                 xdata, title='MultiAcquire (stacked)')
             display_item = self.__api.library._document_model.get_display_item_for_data_item(
                 data_item._data_item)
             display_layer_index += 1  # display item has one display layer already
         #else:
         units_str = ' eV' if data_dict['settings_list'][i][
             'use_multi_eels_calibration'] else ''
         new_data_item = self.document_controller.library.create_data_item_from_data_and_metadata(
             xdata,
             title='MultiAcquire #{:d}, {:g}-{:g}{}, {:g}x{:g} ms{}'.format(
                 index + 1, start_ev, end_ev, units_str, number_frames,
                 exposure_ms, summed))
         data_item_metadata = new_data_item.metadata
         metadata = dict(
             data_item_metadata) if data_item_metadata else dict()
         metadata['MultiAcquire.parameters'] = data_dict['parameter_list'][
             i]
         metadata['MultiAcquire.settings'] = data_dict['settings_list'][i]
         new_data_item.set_metadata(metadata)
         if display_item:
             display_item.append_display_data_channel_for_data_item(
                 data_item._data_item if data_item else new_data_item.
                 _data_item)
             start_ev = data_dict['parameter_list'][i]['start_ev']
             end_ev = data_dict['parameter_list'][i]['end_ev']
             display_layer_label = '#{:d}: {:g}-{:g}{}, {:g}x{:g} ms{}'.format(
                 index + 1, start_ev, end_ev, units_str, number_frames,
                 exposure_ms, summed)
             display_item._set_display_layer_properties(
                 display_layer_index,
                 label=display_layer_label,
                 stroke_color=get_next_color(),
                 fill_color=None)
             display_layer_index += 1
     if display_item:
         display_item.set_display_property('legend_position', 'top-right')
         display_item.title = 'MultiAcquire (stacked)'
         show_display_item(self.document_controller, display_item)
 def test_data_element_to_extended_data_includes_time_zone(self):
     data_element = dict()
     data_element["version"] = 1
     data_element["data"] = numpy.zeros((16, 16), dtype=numpy.double)
     data_element["datetime_modified"] = {'tz': '+0300', 'dst': '+60', 'local_datetime': '2015-06-10T19:31:52.780511'}
     xdata = ImportExportManager.convert_data_element_to_data_and_metadata(data_element)
     self.assertEqual(xdata.timezone_offset, "+0300")
     self.assertEqual(str(xdata.timestamp), "2015-06-10 16:31:52.780511")
Пример #4
0
 def create_result_data_item(self, data_dict):
     display_layers = []
     reset_color_cycle()
     display_item = None
     sorted_indices = numpy.argsort(
         [parms['start_ev'] for parms in data_dict['parameter_list']])
     for i in sorted_indices:
         index = data_dict['parameter_list'][i]['index']
         xdata = ImportExportManager.convert_data_element_to_data_and_metadata(
             data_dict['data_element_list'][i])
         start_ev = data_dict['parameter_list'][i]['start_ev']
         end_ev = data_dict['parameter_list'][i]['end_ev']
         number_frames = data_dict['parameter_list'][i]['frames']
         exposure_ms = data_dict['parameter_list'][i]['exposure_ms']
         summed = ' (summed)' if not data_dict['data_element_list'][i].get(
             'is_sequence', False) and number_frames > 1 else ''
         data_item = None
         if i == sorted_indices[0] and xdata.datum_dimension_count == 1:
             data_item = self.document_controller.library.create_data_item_from_data_and_metadata(
                 xdata, title='MultiAcquire (stacked)')
             display_item = self.__api.library._document_model.get_display_item_for_data_item(
                 data_item._data_item)
             #new_data_item = data_item
         #else:
         new_data_item = self.document_controller.library.create_data_item_from_data_and_metadata(
             xdata,
             title='MultiAcquire #{:d}, {:g}-{:g} eV, {:g}x{:g} ms{}'.
             format(index + 1, start_ev, end_ev, number_frames, exposure_ms,
                    summed))
         metadata = new_data_item.metadata
         metadata['MultiAcquire.parameters'] = data_dict['parameter_list'][
             i]
         metadata['MultiAcquire.settings'] = data_dict['settings_list'][i]
         new_data_item.set_metadata(metadata)
         if display_item:
             display_item.append_display_data_channel_for_data_item(
                 data_item._data_item if data_item else new_data_item.
                 _data_item)
             start_ev = data_dict['parameter_list'][i]['start_ev']
             end_ev = data_dict['parameter_list'][i]['end_ev']
             display_layers.append({
                 'label':
                 '#{:d}: {:g}-{:g} eV, {:g}x{:g} ms{}'.format(
                     index + 1, start_ev, end_ev, number_frames,
                     exposure_ms, summed),
                 'data_index':
                 len(display_layers),
                 'stroke_color':
                 get_next_color(),
                 'fill_color':
                 None
             })
     if display_item:
         display_item.display_layers = display_layers
         display_item.set_display_property('legend_position', 'top-right')
         display_item.title = 'MultiAcquire (stacked)'
         show_display_item(self.document_controller, display_item)
 def grab_sequence(
     self, count: int, **kwargs
 ) -> typing.Optional[typing.List[DataAndMetadata.DataAndMetadata]]:
     self.start_playing()
     frames = self.acquire_sequence(count)
     if frames is not None:
         xdatas = list()
         for data_element in frames:
             data_element["is_sequence"] = True
             data_element["collection_dimension_count"] = 0
             data_element["datum_dimension_count"] = len(
                 data_element["data"].shape) - 1
             xdata = ImportExportManager.convert_data_element_to_data_and_metadata(
                 data_element)
             xdatas.append(xdata)
         return xdatas
     return None
Пример #6
0
 def grab_partial(
         self,
         *,
         update_period: float = 1.0) -> typing.Tuple[bool, bool, int]:
     # updates the full readout data, returns a tuple of is complete, is canceled, and
     # the number of valid rows.
     start = self.__start
     frame_parameters = self.__camera_frame_parameters
     exposure = frame_parameters.exposure_ms / 1000.0
     n = min(max(int(update_period / exposure), 1), self.__count - start)
     is_complete = start + n == self.__count
     # print(f"{start=} {n=} {self.__count=} {is_complete=}")
     data_element = self.__camera_device._acquire_sequence((n))
     if data_element and not self.__aborted:
         xdata = ImportExportManager.convert_data_element_to_data_and_metadata(
             data_element)
         dimensional_calibrations = tuple(
             Calibration.Calibration()
             for _ in range(len(self.__collection_shape))) + tuple(
                 xdata.dimensional_calibrations[1:])
         assert self.__xdata
         self.__xdata._set_intensity_calibration(
             xdata.intensity_calibration)
         self.__xdata._set_dimensional_calibrations(
             dimensional_calibrations)
         if len(self.__collection_shape) > 1:
             row_size = self.__collection_shape[-1]
             start_row = start // row_size
             rows = n // row_size
             metadata = dict(xdata.metadata)
             metadata.setdefault("hardware_source",
                                 dict())["valid_rows"] = start_row + rows
             self.__xdata._set_metadata(metadata)
         else:
             row_size = 1
         # convert from a sequence to a collection.
         assert self.__data is not None
         self.__data.reshape(
             (self.__data.shape[0] * row_size, ) +
             self.__data.shape[len(self.__collection_shape):])[
                 start:start + n, ...] = xdata._data_ex
         self.__start = start + n
         return is_complete, False, start + n
     self.__start = 0
     return True, True, 0
 def test_data_element_to_extended_data_conversion(self):
     data = numpy.ones((8, 6), int)
     intensity_calibration = Calibration.Calibration(offset=1, scale=1.1, units="one")
     dimensional_calibrations = [Calibration.Calibration(offset=2, scale=2.1, units="two"), Calibration.Calibration(offset=3, scale=2.2, units="two")]
     metadata = {"hardware_source": {"one": 1, "two": "b"}}
     timestamp = datetime.datetime.now()
     data_descriptor = DataAndMetadata.DataDescriptor(is_sequence=False, collection_dimension_count=1, datum_dimension_count=1)
     xdata = DataAndMetadata.new_data_and_metadata(data, intensity_calibration=intensity_calibration, dimensional_calibrations=dimensional_calibrations, metadata=metadata, timestamp=timestamp, data_descriptor=data_descriptor)
     data_element = ImportExportManager.create_data_element_from_extended_data(xdata)
     new_xdata = ImportExportManager.convert_data_element_to_data_and_metadata(data_element)
     self.assertTrue(numpy.array_equal(data, new_xdata.data))
     self.assertNotEqual(id(new_xdata.intensity_calibration), id(intensity_calibration))
     self.assertEqual(new_xdata.intensity_calibration, intensity_calibration)
     self.assertNotEqual(id(new_xdata.dimensional_calibrations[0]), id(dimensional_calibrations[0]))
     self.assertEqual(new_xdata.dimensional_calibrations, dimensional_calibrations)
     self.assertNotEqual(id(new_xdata.metadata), id(metadata))
     self.assertEqual(new_xdata.metadata, metadata)
     self.assertNotEqual(id(new_xdata.data_descriptor), id(data_descriptor))
     self.assertEqual(new_xdata.data_descriptor, data_descriptor)
Пример #8
0
        def acquire_sequence(api: API.API, document_window: API.DocumentWindow,
                             scan_hardware_source,
                             camera_hardware_source) -> None:
            try:
                logging.debug("start")
                self.acquisition_state_changed_event.fire({"message": "start"})
                try:
                    camera_hardware_source_id = camera_hardware_source._hardware_source.hardware_source_id
                    camera_frame_parameters = camera_hardware_source.get_frame_parameters_for_profile_by_index(
                        0)
                    if sum_frames:
                        camera_frame_parameters["processing"] = "sum_project"

                    scan_frame_parameters = scan_hardware_source.get_frame_parameters_for_profile_by_index(
                        2)
                    scan_max_area = 2048 * 2048
                    scan_param_height = int(scan_frame_parameters["size"][0])
                    scan_param_width = int(scan_frame_parameters["size"][1])
                    if scan_param_height * scan_param_width > scan_max_area:
                        scan_param_height = scan_max_area // scan_param_width
                    scan_frame_parameters[
                        "size"] = scan_param_height, scan_param_width
                    scan_frame_parameters["pixel_time_us"] = int(
                        1000 * camera_frame_parameters["exposure_ms"] * 0.75)
                    # long timeout is needed until memory allocation is outside of the acquire_sequence call.
                    scan_frame_parameters[
                        "external_clock_wait_time_ms"] = 20000  # int(camera_frame_parameters["exposure_ms"] * 1.5)
                    scan_frame_parameters["external_clock_mode"] = 1
                    scan_frame_parameters["ac_line_sync"] = False
                    scan_frame_parameters["ac_frame_sync"] = False
                    flyback_pixels = scan_hardware_source._hardware_source.flyback_pixels  # using internal API
                    scan_height = scan_param_height
                    scan_width = scan_param_width + flyback_pixels

                    library = document_window.library

                    camera_hardware_source._hardware_source.set_current_frame_parameters(
                        camera_hardware_source._hardware_source.
                        get_frame_parameters_from_dict(
                            camera_frame_parameters))
                    camera_hardware_source._hardware_source.acquire_sequence_prepare(
                        scan_width * scan_height)

                    with contextlib.closing(
                            scan_hardware_source.create_record_task(
                                scan_frame_parameters)) as scan_task:
                        data_elements = camera_hardware_source._hardware_source.acquire_sequence(
                            scan_width * scan_height)
                        data_element = data_elements[0]
                        # the data_element['data'] ndarray may point to low level memory; we need to get it to disk
                        # quickly. see note below.
                        scan_data_list = scan_task.grab()
                        data_shape = data_element["data"].shape
                        if flyback_pixels > 0:
                            data_element["data"] = data_element["data"].reshape(
                                scan_height, scan_width,
                                *data_shape[1:])[:,
                                                 flyback_pixels:scan_width, :]
                        else:
                            data_element["data"] = data_element[
                                "data"].reshape(scan_height, scan_width,
                                                *data_shape[1:])
                        if len(scan_data_list) > 0:
                            collection_calibrations = [
                                calibration.write_dict() for calibration in
                                scan_data_list[0].dimensional_calibrations
                            ]
                            scan_properties = scan_data_list[0].metadata
                        else:
                            collection_calibrations = [{}, {}]
                            scan_properties = {}
                        if "spatial_calibrations" in data_element:
                            datum_calibrations = [
                                copy.deepcopy(spatial_calibration)
                                for spatial_calibration in
                                data_element["spatial_calibrations"][1:]
                            ]
                        else:
                            datum_calibrations = [{} for i in range(
                                len(data_element["data"].shape) - 2)]
                        # combine the dimensional calibrations from the scan data with the datum dimensions calibration from the sequence
                        data_element["collection_dimension_count"] = 2
                        data_element[
                            "spatial_calibrations"] = collection_calibrations + datum_calibrations
                        data_element.setdefault(
                            "metadata",
                            dict())["scan_detector"] = scan_properties.get(
                                "hardware_source", dict())
                        data_and_metadata = ImportExportManager.convert_data_element_to_data_and_metadata(
                            data_element)

                        def create_and_display_data_item():
                            data_item = library.get_data_item_for_hardware_source(
                                scan_hardware_source,
                                channel_id=camera_hardware_source_id,
                                processor_id="summed",
                                create_if_needed=True,
                                large_format=True)
                            data_item.title = _("Spectrum Image {}".format(
                                " x ".join([
                                    str(d) for d in
                                    data_and_metadata.dimensional_shape
                                ])))
                            # the data item should not have any other 'clients' at this point; so setting the
                            # data and metadata will immediately unload the data (and write to disk). this is important,
                            # because the data (up to this point) can be shared data from the DLL.
                            data_item.set_data_and_metadata(data_and_metadata)
                            # assert not data_item._data_item.is_data_loaded
                            # now to display it will reload the data (presumably from an HDF5 or similar on-demand format).
                            document_window.display_data_item(data_item)
                            for scan_data_and_metadata in scan_data_list:
                                scan_channel_id = scan_data_and_metadata.metadata[
                                    "hardware_source"]["channel_id"]
                                scan_channel_name = scan_data_and_metadata.metadata[
                                    "hardware_source"]["channel_name"]
                                channel_id = camera_hardware_source_id + "_" + scan_channel_id
                                data_item = library.get_data_item_for_hardware_source(
                                    scan_hardware_source,
                                    channel_id=channel_id,
                                    create_if_needed=True)
                                data_item.title = "{} ({})".format(
                                    _("Spectrum Image"), scan_channel_name)
                                data_item.set_data_and_metadata(
                                    scan_data_and_metadata)
                                document_window.display_data_item(data_item)

                        document_window.queue_task(create_and_display_data_item
                                                   )  # must occur on UI thread
                finally:
                    self.acquisition_state_changed_event.fire(
                        {"message": "end"})
                    logging.debug("end")
            except Exception as e:
                import traceback
                traceback.print_exc()
Пример #9
0
    def process_display_queue(self):
        while True:
            try:
                data_dict = self.__display_queue.get(timeout=1)
            except queue.Empty:
                if self.__data_processed_event.is_set(
                ) and not self.__acquisition_running:
                    self.__close_data_item_refs()
                    break
            else:
                index = data_dict['parameters']['index']
                line_number = data_dict['parameters']['line_number']
                data_element = data_dict['data_element']
                line_data = data_element['data']
                start_ev = data_dict['parameters']['start_ev']
                end_ev = data_dict['parameters']['end_ev']
                number_frames = data_dict['parameters']['frames']
                exposure_ms = data_dict['parameters']['exposure_ms']
                print('got data from display queue')
                if not self.result_data_items.get(index):
                    print('creating new data item')
                    spatial_calibrations = data_element['spatial_calibrations']
                    data_element['data'] = line_data[numpy.newaxis, ...]
                    data_element['collection_dimension_count'] = 2
                    data_element[
                        'spatial_calibrations'] = self.multi_acquire_controller.scan_calibrations[
                            0:1] + spatial_calibrations
                    metadata = data_element.get('metadata', {})
                    metadata['MultiAcquire.parameters'] = data_dict[
                        'parameters']
                    metadata['MultiAcquire.settings'] = data_dict['settings']
                    data_element['metadata'] = data_dict['parameters']
                    new_xdata = ImportExportManager.convert_data_element_to_data_and_metadata(
                        data_element)
                    title = ('MultiAcquire (stitched)'
                             if data_dict.get('stitched_data') else
                             'MultiAcquire #{:d}, {:g}-{:g} eV, {:g}x{:g} ms'.
                             format(index + 1, start_ev, end_ev, number_frames,
                                    exposure_ms))
                    data_item_ready_event = threading.Event()
                    new_data_item = None

                    def create_data_item():
                        nonlocal new_data_item
                        # we have to create the initial data item with some data that has more than 2 dimensions
                        # otherwise Swift does not use HDF5 and we will have a problem if it grows too big later
                        new_data_item = self.__api.library.create_data_item_from_data_and_metadata(
                            new_xdata, title=title)
                        data_item_ready_event.set()

                    self.__api.queue_task(create_data_item)
                    data_item_ready_event.wait()
                    number_lines = data_dict['parameters'].get(
                        'number_lines',
                        self.multi_acquire_controller.number_lines)
                    max_shape = (number_lines, ) + line_data.shape
                    new_appendable_data_item = AppendableDataItemFixedSize(
                        new_data_item, max_shape, self.__api)
                    new_appendable_data_item.enter_write_suspend_state()
                    self.result_data_items[index] = new_appendable_data_item
                    # add the line we have already in our data item to the appendable data item to have them in the
                    # same state
                    self.result_data_items[index].add_data((line_number, ...),
                                                           line_data)
                    self.__api.queue_task(lambda: self.result_data_items[
                        index].get_partial_data_item(
                            (slice(0, line_number + 1), ...)))
                    del new_xdata
                    del new_appendable_data_item
                else:
                    self.result_data_items[index].add_data((line_number, ...),
                                                           line_data)
                    self.__api.queue_task(lambda: self.result_data_items[
                        index].get_partial_data_item(
                            (slice(0, line_number + 1), ...)))
                del line_data
                del data_element
                del data_dict
                self.__display_queue.task_done()
                print('displayed line {:.0f}'.format(line_number))