def __init__(self, base_title: str, data_group: typing.Optional[DataGroup.DataGroup], filter_id: typing.Optional[str], document_controller: DocumentController.DocumentController): self.__base_title = base_title self.__data_group = data_group self.__filter_id = filter_id self.__filter_predicate = document_controller.get_filter_predicate( filter_id) if self.__filter_id else ListModel.Filter(True) self.on_title_changed: typing.Optional[typing.Callable[[str], None]] = None self.__count = 0 # useful for drag and drop self.document_controller = document_controller self.document_model = document_controller.document_model container = self.__data_group or document_controller.document_model def count_changed(count: Observer.ItemValue) -> None: self.__count = count if callable(self.on_title_changed): self.on_title_changed(self.title) oo = Observer.ObserverBuilder() oo.source(container).sequence_from_array( "display_items", predicate=self.__filter_predicate.matches).len().action_fn( count_changed) self.__count_observer = typing.cast(Observer.AbstractItemSource, oo.make_observable())
def add_line_profile( data_item: DataItem.DataItem, document_controller: DocumentController.DocumentController, display_panel_id: str, midpoint: float = 0.5, integration_width: float = 0.25) -> None: logging.debug("midpoint: {:.4f}".format(midpoint)) logging.debug("width: {:.4f}".format(integration_width)) # next, line profile through center of crop # please don't copy this bad example code! crop_region = Graphics.RectangleGraphic() crop_region.center = Geometry.FloatPoint(midpoint, 0.5) crop_region.size = Geometry.FloatSize(integration_width, 1) crop_region.is_bounds_constrained = True display_item = document_controller.document_model.get_display_item_for_data_item( data_item) assert display_item display_item.add_graphic(crop_region) display_data_item = display_item.data_item assert display_data_item eels_data_item = document_controller.document_model.get_projection_new( display_item, display_data_item, crop_region) if eels_data_item: eels_data_item.title = _("EELS Summed") eels_display_item = document_controller.document_model.get_display_item_for_data_item( eels_data_item) assert eels_display_item document_controller.show_display_item(eels_display_item) else: eels_display_item = None workspace_controller = document_controller.workspace_controller if workspace_controller and eels_display_item: workspace_controller.display_display_item_in_display_panel( eels_display_item, display_panel_id)
def __init__( self, ui: UserInterface.UserInterface, document_controller: DocumentController.DocumentController ) -> None: content_widget = ui.create_column_widget() super().__init__(content_widget) document_model = document_controller.document_model all_items_controller = CollectionDisplayItemCounter( _("All"), None, "all", document_controller) persistent_items_controller = CollectionDisplayItemCounter( _("Persistent"), None, "persistent", document_controller) live_items_controller = CollectionDisplayItemCounter( _("Live"), None, "temporary", document_controller) latest_items_controller = CollectionDisplayItemCounter( _("Latest Session"), None, "latest-session", document_controller) self.__item_controllers = [ all_items_controller, persistent_items_controller, live_items_controller, latest_items_controller ] self.__data_group_controllers: typing.List[ CollectionDisplayItemCounter] = list() collection_selection = Selection.IndexedSelection( Selection.Style.single_or_none) collections_list_widget = Widgets.ListWidget( ui, CollectionListCanvasItemDelegate(collection_selection), selection=collection_selection, v_scroll_enabled=False, v_auto_resize=True) collections_list_widget.wants_drag_events = True def filter_changed(data_group: typing.Optional[DataGroup.DataGroup], filter_id: typing.Optional[str]) -> None: if data_group: for index, controller in enumerate( collections_list_widget.items): if data_group == controller.data_group: collection_selection.set(index) break else: if filter_id == "latest-session": collection_selection.set(3) elif filter_id == "temporary": collection_selection.set(2) elif filter_id == "persistent": collection_selection.set(1) else: collection_selection.set(0) self.__filter_changed_event_listener = document_controller.filter_changed_event.listen( filter_changed) def collections_changed(t: str) -> None: collections_list_widget.items = [ all_items_controller, persistent_items_controller, live_items_controller, latest_items_controller, ] + self.__data_group_controllers all_items_controller.on_title_changed = collections_changed persistent_items_controller.on_title_changed = collections_changed live_items_controller.on_title_changed = collections_changed latest_items_controller.on_title_changed = collections_changed def document_model_item_inserted(key: str, value: typing.Any, before_index: int) -> None: if key == "data_groups": data_group = value controller = CollectionDisplayItemCounter( data_group.title, data_group, None, document_controller) self.__data_group_controllers.insert(before_index, controller) controller.on_title_changed = collections_changed collections_changed(str()) def document_model_item_removed(key: str, value: typing.Any, index: int) -> None: if key == "data_groups": controller = self.__data_group_controllers.pop(index) controller.close() collections_changed(str()) self.__document_model_item_inserted_listener = document_model.item_inserted_event.listen( document_model_item_inserted) self.__document_model_item_removed_listener = document_model.item_removed_event.listen( document_model_item_removed) data_group, filter_id = document_controller.get_data_group_and_filter_id( ) filter_changed(data_group, filter_id) for index, data_group in enumerate(document_model.data_groups): document_model_item_inserted("data_groups", data_group, index) collections_changed(str()) def collections_selection_changed( indexes: typing.AbstractSet[int]) -> None: if len(indexes) == 0: controller = collections_list_widget.items[0] document_controller.set_filter(controller.filter_id) elif len(indexes) == 1: controller = collections_list_widget.items[list(indexes)[0]] if controller.is_smart_collection: document_controller.set_filter(controller.filter_id) document_controller.set_data_group(None) else: document_controller.set_filter(None) document_controller.set_data_group(controller.data_group) collections_list_widget.on_selection_changed = collections_selection_changed collections_column = ui.create_column_widget() collections_column.add(collections_list_widget) collections_section = Widgets.SectionWidget(ui, _("Collections"), collections_column) collections_section.expanded = True content_widget.add(collections_section) content_widget.add_stretch() # for testing self._collection_selection = collection_selection
def __init__( self, document_controller: DocumentController.DocumentController ) -> None: self.ui = document_controller.ui self.__periodic_listener = document_controller.add_periodic( 1.0, self.__periodic) self.item_model_controller = self.ui.create_item_model_controller() self.__weak_document_controller = typing.cast( _DocumentControllerWeakRefType, weakref.ref(document_controller)) self.__display_item_tree = TreeNode(reversed=True) self.__display_item_tree_mutex = threading.RLock() self.__display_item_tree.child_inserted = self.__insert_child self.__display_item_tree.child_removed = self.__remove_child self.__display_item_tree.tree_node_updated = self.__update_tree_node # thread safe. def display_item_inserted(key: str, display_item: _ValueType, before_index: int) -> None: """ This method will be called from the data item list model, which comes from the document controller to notify that the list of data items in the document has changed. This method breaks the date-related metadata out into a list of indexes which are then displayed in tree format for the date browser. in this case, the indexes are added. """ assert threading.current_thread() == threading.main_thread() with self.__display_item_tree_mutex: created_local = display_item.created_local indexes = created_local.year, created_local.month, created_local.day self.__display_item_tree.insert_value(indexes, display_item) # thread safe. def display_item_removed(key: str, display_item: _ValueType, index: int) -> None: """ This method will be called from the data item list model, which comes from the document controller to notify that the list of data items in the document has changed. This method breaks the date-related metadata out into a list of indexes which are then displayed in tree format for the date browser. in this case, the indexes are removed. """ assert threading.current_thread() == threading.main_thread() with self.__display_item_tree_mutex: created = display_item.created_local indexes = created.year, created.month, created.day self.__display_item_tree.remove_value(indexes, display_item) # connect the display_items_model from the document controller to self. # when data items are inserted or removed from the document controller, the inserter and remover methods # will be called. self.__display_items_model = document_controller.display_items_model self.__display_item_inserted_listener = self.__display_items_model.item_inserted_event.listen( display_item_inserted) self.__display_item_removed_listener = self.__display_items_model.item_removed_event.listen( display_item_removed) self.__mapping = dict() self.__mapping[id( self.__display_item_tree)] = self.item_model_controller.root self.__node_counts_dirty = False self.__date_filter: typing.Optional[ListModel.Filter] = None self.__text_filter: typing.Optional[ListModel.Filter] = None for index, display_item in enumerate( self.__display_items_model.display_items): display_item_inserted("display_items", display_item, index)
def acquire_stack_and_sum( number_frames: int, energy_offset: float, dark_ref_enabled: bool, dark_ref_data: typing.Optional[_NDArray], cross_cor: bool, document_controller: DocumentController.DocumentController, final_layout_fn: typing.Callable[[], None]) -> None: # grab the document model and workspace for convenience with document_controller.create_task_context_manager( _("Multiple Shift EELS Acquire"), "table") as task: # acquire the stack. it will be added to the document by # queueing to the main thread at the end of this method. stack_data_item = acquire_series(number_frames, energy_offset, dark_ref_enabled, dark_ref_data, task) stack_data_item.title = _("Spectrum Stack") # align and sum the stack data_element: typing.Dict[str, typing.Any] = dict() stack_data = stack_data_item.data if stack_data is not None: if cross_cor: # Apply cross-correlation between subsequent acquired # images and align the image stack summed_image, _1 = align_stack(stack_data, task) else: # If user does not desire the cross-correlation to happen # then simply sum the stack (eg, when acquiring dark data) summed_image = numpy.sum(stack_data, axis=0) # add the summed image to Swift data_element["data"] = summed_image data_element["title"] = "Aligned and summed spectra" # strip off the first dimension that we sum over for dimensional_calibration in ( stack_data_item.dimensional_calibrations[1:]): data_element.setdefault( "spatial_calibrations", list()).append({ "origin": dimensional_calibration.offset, # TODO: fix me "scale": dimensional_calibration.scale, "units": dimensional_calibration.units }) # set the energy dispersive calibration so that the ZLP is at # zero eV zlp_position_pixels = numpy.sum(summed_image, axis=0).argmax() zlp_position_calibrated_units = ( -zlp_position_pixels * data_element["spatial_calibrations"][1]["scale"]) data_element["spatial_calibrations"][1]["offset"] = ( zlp_position_calibrated_units) sum_data_item = ( ImportExportManager.create_data_item_from_data_element( data_element)) dispersive_sum = numpy.sum(summed_image, axis=1) differential = numpy.diff(dispersive_sum) # type: ignore top = numpy.argmax(differential) bottom = numpy.argmin(differential) _midpoint = numpy.mean([bottom, top]) / dispersive_sum.shape[0] _integration_width = (float(numpy.abs(bottom - top)) / dispersive_sum.shape[0]) document_controller.queue_task(final_layout_fn) document_controller.queue_task( functools.partial(show_in_panel, stack_data_item, document_controller, "multiple_shift_eels_stack")) document_controller.queue_task( functools.partial( show_in_panel, sum_data_item, document_controller, "multiple_shift_eels_aligned_summed_stack")) document_controller.queue_task( functools.partial(add_line_profile, sum_data_item, document_controller, "multiple_shift_eels_spectrum", _midpoint, _integration_width))
async def grab(self, document_controller: DocumentController.DocumentController, hardware_source: scan_base.ScanHardwareSource, do_acquire: bool) -> None: # this is an async method meaning that it will execute until it calls await, at which time # it will let other parts of the software run until the awaited function finishes. in this # case, waiting for acquired data and grabbing the last frames are run in a thread. assert document_controller assert hardware_source event_loop = document_controller.event_loop self.cancel_event.clear() self.state.value = "running" self.progress_model.value = 0 frame_count = self.frame_count_model.value or 0 was_playing = hardware_source.is_playing success_ref = [True] xdata_group_list: typing.List[typing.Sequence[typing.Optional[ DataAndMetadata.DataAndMetadata]]] = list() def exec_acquire() -> None: # this will execute in a thread; the enclosing async routine will continue when it finishes try: start_time = time.time() max_wait_time = max( hardware_source.get_current_frame_time() * 1.5, 3) while not hardware_source.is_playing: if time.time() - start_time > max_wait_time: success_ref[0] = False return time.sleep(0.01) hardware_source.get_next_xdatas_to_start( max_wait_time * 2) # wait for frame + next frame for i in range(frame_count): self.progress_model.value = int(100 * i / frame_count) if self.cancel_event.is_set(): success_ref[0] = False break hardware_source.get_next_xdatas_to_finish(max_wait_time * 2) except Exception as e: import traceback traceback.print_exc() success_ref[0] = False if do_acquire: print("AR: start playing") hardware_source.start_playing() print("AR: wait for acquire") await event_loop.run_in_executor(None, exec_acquire) print("AR: acquire finished") def exec_grab() -> None: # this will execute in a thread; the enclosing async routine will continue when it finishes try: start_time = time.time() max_wait_time = max( hardware_source.get_current_frame_time() * 1.5, 3) while hardware_source.is_playing: if time.time() - start_time > max_wait_time: success_ref[0] = False return time.sleep(0.01) data_element_groups = hardware_source.get_buffer_data( -frame_count, frame_count) for data_element_group in data_element_groups: if self.cancel_event.is_set(): success_ref[0] = False break xdata_group = list() for data_element in data_element_group: xdata = ImportExportManager.convert_data_element_to_data_and_metadata( data_element) xdata_group.append(xdata) xdata_group_list.append(xdata_group) self.progress_model.value = 100 except Exception as e: import traceback traceback.print_exc() success_ref[0] = False if success_ref[0]: print("AR: stop playing") hardware_source.stop_playing() print("AR: grabbing data") await event_loop.run_in_executor(None, exec_grab) print("AR: grab finished") xdata_group = None if success_ref[0]: if len(xdata_group_list) > 1: print("AR: making xdata") valid_count = 0 examplar_xdata_group = xdata_group_list[-1] shapes = [ xdata._data_ex.shape for xdata in examplar_xdata_group if xdata ] dtypes = [ xdata._data_ex.dtype for xdata in examplar_xdata_group if xdata ] for xdata_group in reversed(xdata_group_list): shapes_i = [ xdata._data_ex.shape for xdata in xdata_group if xdata ] dtypes_i = [ xdata._data_ex.dtype for xdata in xdata_group if xdata ] if shapes_i == shapes and dtypes_i == dtypes: valid_count += 1 xdata_group = list() for i, xdata in enumerate(examplar_xdata_group): if xdata: intensity_calibration = xdata.intensity_calibration dimensional_calibrations = [ Calibration.Calibration() ] + list(xdata.dimensional_calibrations) data_descriptor = DataAndMetadata.DataDescriptor( True, xdata.data_descriptor.collection_dimension_count, xdata.data_descriptor.datum_dimension_count) # TODO: ugly typing. data: numpy.typing.NDArray[typing.Any] = numpy.vstack( list( typing.cast(DataAndMetadata.DataAndMetadata, xdata_group[i])._data_ex for xdata_group in xdata_group_list[-valid_count:])).reshape( valid_count, *shapes[i]) xdata = DataAndMetadata.new_data_and_metadata( data, intensity_calibration=intensity_calibration, dimensional_calibrations=dimensional_calibrations, data_descriptor=data_descriptor, metadata=xdata.metadata) xdata_group.append(xdata) elif len(xdata_group_list) == 1: xdata_group = xdata_group_list[0] if xdata_group: print("AR: making data item") for xdata in xdata_group: if xdata: data_item = DataItem.DataItem(large_format=True) data_item.ensure_data_source() data_item.set_xdata(xdata) channel_name = xdata.metadata.get( "hardware_source", dict()).get("channel_name") channel_ext = (" (" + channel_name + ")") if channel_name else "" data_item.title = _( "Recording of " ) + hardware_source.display_name + channel_ext document_controller.document_model.append_data_item( data_item) display_item = document_controller.document_model.get_display_item_for_data_item( data_item) if display_item: document_controller.show_display_item(display_item) if was_playing: print("AR: restarting") hardware_source.start_playing() self.state.value = "idle" self.progress_model.value = 0 print("AR: done")