def menu_item_execute(self, window: API.DocumentWindow) -> None: data_item = window.target_data_item if not data_item: return vector_a = data_item.add_line_region(0.5, 0.5, 0.5, 0.75) vector_a.label = "Vector a" vector_a._graphic.end_arrow_enabled = True vector_b = data_item.add_line_region(0.5, 0.5, 0.75, 0.5) vector_b.label = "Vector b" vector_b._graphic.end_arrow_enabled = True result_data_item = self.__api.library.create_data_item_from_data_and_metadata( DataAndMetadata.new_data_and_metadata( numpy.zeros_like(data_item.data), data_descriptor=data_item.xdata.data_descriptor), title="Affine Transformed {}".format(data_item.title)) self.__api.library.create_computation( "nion.affine_transform_image", inputs={ "src_data_item": data_item, "vector_a": [vector_a], "vector_b": [vector_b] }, outputs={"target": result_data_item}) window.display_data_item(result_data_item)
def _run_align_zlp(api: API_1_0.API, window: API_1_0.DocumentWindow, method_id: str, method_name: str): # find the focused data item src_display = window.target_display if src_display and src_display.data_item: ref_index = 0 if src_display._display_item.display_data_channel: # Using the sequence index as reference only makes sense for "pure" sequences because the index will be # interpreted as index in the flattened non-datum axes if src_display.data_item.xdata.is_sequence and not src_display.data_item.xdata.is_collection: ref_index = src_display._display_item.display_data_channel.sequence_index def progress(i): logging.info(f"Processing row {i} (align zlp)") roi = src_display.selected_graphics[0] if src_display.selected_graphics else None dst_xdata, shift_xdata = align_zlp_xdata(src_display.data_item.xdata, progress, method=method_id, roi=roi, ref_index=ref_index) if dst_xdata: # create a new data item in the library and set its title. if shift_xdata: shift_data_item = api.library.create_data_item_from_data_and_metadata(shift_xdata) shift_data_item.title = f"Shifts ({method_name}) " + src_display.data_item.title data_item = api.library.create_data_item_from_data_and_metadata(dst_xdata) data_item.title = f"Aligned ({method_name}) " + src_display.data_item.title # display the data item. window.display_data_item(data_item) else: logging.error("Failed: Data is not a sequence or collection of 1D spectra.") else: logging.error("Failed: No data item selected.")
def menu_item_execute(self, window: API.DocumentWindow) -> None: document_controller = window._document_controller selected_display_items = document_controller.selected_display_items data_items = list() src_vector_a = None src_vector_b = None for i, display_item in enumerate(selected_display_items): data_item = display_item.data_items[0] if display_item and len( display_item.data_items) > 0 else None if data_item and src_vector_a is None: for computation in self.__api.library._document_model.computations: if computation.processing_id == "nion.affine_transform_image": if computation.get_input("src_data_item") == data_item: src_vector_a = computation.get_input("vector_a") if src_vector_a is not None: src_vector_a = self.__api._new_api_object( src_vector_a[0]) src_vector_b = computation.get_input("vector_b") if src_vector_b is not None: src_vector_b = self.__api._new_api_object( src_vector_b[0]) break else: data_items.append(self.__api._new_api_object(data_item)) elif data_item: data_items.append(self.__api._new_api_object(data_item)) if src_vector_a is None: return for data_item in data_items: vector_a = data_item.add_line_region(src_vector_a.start[0], src_vector_a.start[1], src_vector_a.end[0], src_vector_a.end[1]) vector_a.label = "Vector a" vector_a._graphic.end_arrow_enabled = True vector_b = data_item.add_line_region(src_vector_b.start[0], src_vector_b.start[1], src_vector_b.end[0], src_vector_b.end[1]) vector_b.label = "Vector b" vector_b._graphic.end_arrow_enabled = True result_data_item = self.__api.library.create_data_item_from_data_and_metadata( DataAndMetadata.new_data_and_metadata( numpy.zeros_like(data_item.data), data_descriptor=data_item.xdata.data_descriptor), title="Affine Transformed {}".format(data_item.title)) self.__api.library.create_computation( "nion.affine_transform_image", inputs={ "src_data_item": data_item, "vector_a": [vector_a], "vector_b": [vector_b] }, outputs={"target": result_data_item}) window.display_data_item(result_data_item)
def menu_item_execute(self, window: API.DocumentWindow) -> None: data_item = window.target_data_item if data_item: xdata = data_item.xdata if xdata.datum_dimension_count == 1: signal = nion.hyperspy.xdata_to_signal(xdata, copy_data=True) signal.set_signal_type('EELS') signal.align_zero_loss_peak(print_stats=False) xdata = nion.hyperspy.signal_to_xdata(signal) data_item = window.library.create_data_item_from_data_and_metadata( xdata) window.display_data_item(data_item)
def align_multi_si(api: API_1_0.API, window: API_1_0.DocumentWindow): selected_display_items = window._document_controller._get_two_data_sources() error_msg = "Select a sequence of spectrum images and a sequence of scanned images in order to use this computation." assert selected_display_items[0][0] is not None, error_msg assert selected_display_items[1][0] is not None, error_msg assert selected_display_items[0][0].data_item is not None, error_msg assert selected_display_items[1][0].data_item is not None, error_msg assert selected_display_items[0][0].data_item.is_sequence, error_msg assert selected_display_items[1][0].data_item.is_sequence, error_msg if selected_display_items[0][0].data_item.is_collection: si_sequence_data_item = selected_display_items[0][0].data_item haadf_sequence_data_item = selected_display_items[1][0].data_item align_region = selected_display_items[1][1] align_index = selected_display_items[1][0].display_data_channel.sequence_index elif selected_display_items[1][0].data_item.is_collection: si_sequence_data_item = selected_display_items[1][0].data_item haadf_sequence_data_item = selected_display_items[0][0].data_item align_region = selected_display_items[0][1] align_index = selected_display_items[0][0].display_data_channel.sequence_index else: raise ValueError(error_msg) aligned_haadf = api.library.create_data_item_from_data(numpy.zeros((1,1,1)), title="Aligned {}".format(haadf_sequence_data_item.title)) aligned_si = api.library.create_data_item_from_data(numpy.zeros((1,1,1)), title="Aligned {}".format(si_sequence_data_item.title)) inputs = {"si_sequence_data_item": api._new_api_object(si_sequence_data_item), "haadf_sequence_data_item": api._new_api_object(haadf_sequence_data_item), "align_index": align_index} if align_region: inputs["align_region"] = api._new_api_object(align_region) computation = api.library.create_computation("eels.align_multi_si", inputs=inputs, outputs={"aligned_haadf": aligned_haadf, "aligned_si": aligned_si}) computation._computation.source = aligned_si._data_item window.display_data_item(aligned_haadf) window.display_data_item(aligned_si)
def acquire_spectrum_image( api: API.API, document_window: API.DocumentWindow) -> None: try: logging.debug("start") self.acquisition_state_changed_event.fire({"message": "start"}) try: eels_camera = api.get_hardware_source_by_id("orca_camera", version="1.0") eels_camera_parameters = eels_camera.get_frame_parameters_for_profile_by_index( 0) scan_controller = api.get_hardware_source_by_id( "scan_controller", version="1.0") scan_parameters = scan_controller.get_frame_parameters_for_profile_by_index( 2) scan_max_size = 2048 scan_parameters["size"] = min( scan_max_size, scan_parameters["size"][0]), min( scan_max_size, scan_parameters["size"][1]) scan_parameters["pixel_time_us"] = int( 1000 * eels_camera_parameters["exposure_ms"] * 0.75) scan_parameters["external_clock_wait_time_ms"] = int( eels_camera_parameters["exposure_ms"] * 1.5) scan_parameters["external_clock_mode"] = 1 library = document_window.library data_item = library.create_data_item(_("Spectrum Image")) document_window.display_data_item(data_item) # force the data to be held in memory and write delayed by grabbing a data_ref. with library.data_ref_for_data_item(data_item) as data_ref: flyback_pixels = 2 with contextlib.closing( eels_camera.create_view_task( frame_parameters=eels_camera_parameters, buffer_size=16)) as eels_view_task: # wait for a frame, then create the record task during the next frame, then wait for that # frame to finish. that will position the scan at the first position. proceed with acquisition. eels_view_task.grab_next_to_finish() eels_view_task.grab_earliest( ) # wait for current frame to finish with contextlib.closing( scan_controller.create_record_task( scan_parameters)) as scan_task: try: scan_height = scan_parameters["size"][0] scan_width = scan_parameters["size"][ 1] + flyback_pixels data_and_metadata_list = eels_view_task.grab_earliest( ) eels_data_and_metadata = data_and_metadata_list[ 1] eels_data = eels_data_and_metadata.data frame_index_base = eels_data_and_metadata.metadata[ "hardware_source"]["frame_index"] frame_index = eels_data_and_metadata.metadata[ "hardware_source"][ "frame_index"] - frame_index_base while True: if self.__aborted: scan_task.cancel() break column = frame_index % scan_width row = frame_index // scan_width if data_ref.data is None: data_ref.data = numpy.zeros( scan_parameters["size"] + (eels_data.shape[0], ), numpy.float) if row >= scan_height: break if column < data_ref.data.shape[1]: data_ref[row, column, :] = eels_data self.acquisition_state_changed_event.fire( { "message": "update", "position": (row, column + flyback_pixels) }) data_and_metadata_list = eels_view_task.grab_earliest( ) eels_data_and_metadata = data_and_metadata_list[ 1] eels_data = eels_data_and_metadata.data frame_index = eels_data_and_metadata.metadata[ "hardware_source"][ "frame_index"] - frame_index_base except: scan_task.cancel() raise finally: self.acquisition_state_changed_event.fire( {"message": "end"}) logging.debug("end") except Exception as e: import traceback traceback.print_exc()
def acquire_sequence(api: API.API, document_window: API.DocumentWindow, scan_hardware_source, camera_hardware_source) -> None: try: logging.debug("start") self.acquisition_state_changed_event.fire({"message": "start"}) try: camera_hardware_source_id = camera_hardware_source._hardware_source.hardware_source_id camera_frame_parameters = camera_hardware_source.get_frame_parameters_for_profile_by_index( 0) if sum_frames: camera_frame_parameters["processing"] = "sum_project" scan_frame_parameters = scan_hardware_source.get_frame_parameters_for_profile_by_index( 2) scan_max_area = 2048 * 2048 scan_param_height = int(scan_frame_parameters["size"][0]) scan_param_width = int(scan_frame_parameters["size"][1]) if scan_param_height * scan_param_width > scan_max_area: scan_param_height = scan_max_area // scan_param_width scan_frame_parameters[ "size"] = scan_param_height, scan_param_width scan_frame_parameters["pixel_time_us"] = int( 1000 * camera_frame_parameters["exposure_ms"] * 0.75) # long timeout is needed until memory allocation is outside of the acquire_sequence call. scan_frame_parameters[ "external_clock_wait_time_ms"] = 20000 # int(camera_frame_parameters["exposure_ms"] * 1.5) scan_frame_parameters["external_clock_mode"] = 1 scan_frame_parameters["ac_line_sync"] = False scan_frame_parameters["ac_frame_sync"] = False flyback_pixels = scan_hardware_source._hardware_source.flyback_pixels # using internal API scan_height = scan_param_height scan_width = scan_param_width + flyback_pixels library = document_window.library camera_hardware_source._hardware_source.set_current_frame_parameters( camera_hardware_source._hardware_source. get_frame_parameters_from_dict( camera_frame_parameters)) camera_hardware_source._hardware_source.acquire_sequence_prepare( scan_width * scan_height) with contextlib.closing( scan_hardware_source.create_record_task( scan_frame_parameters)) as scan_task: data_elements = camera_hardware_source._hardware_source.acquire_sequence( scan_width * scan_height) data_element = data_elements[0] # the data_element['data'] ndarray may point to low level memory; we need to get it to disk # quickly. see note below. scan_data_list = scan_task.grab() data_shape = data_element["data"].shape if flyback_pixels > 0: data_element["data"] = data_element["data"].reshape( scan_height, scan_width, *data_shape[1:])[:, flyback_pixels:scan_width, :] else: data_element["data"] = data_element[ "data"].reshape(scan_height, scan_width, *data_shape[1:]) if len(scan_data_list) > 0: collection_calibrations = [ calibration.write_dict() for calibration in scan_data_list[0].dimensional_calibrations ] scan_properties = scan_data_list[0].metadata else: collection_calibrations = [{}, {}] scan_properties = {} if "spatial_calibrations" in data_element: datum_calibrations = [ copy.deepcopy(spatial_calibration) for spatial_calibration in data_element["spatial_calibrations"][1:] ] else: datum_calibrations = [{} for i in range( len(data_element["data"].shape) - 2)] # combine the dimensional calibrations from the scan data with the datum dimensions calibration from the sequence data_element["collection_dimension_count"] = 2 data_element[ "spatial_calibrations"] = collection_calibrations + datum_calibrations data_element.setdefault( "metadata", dict())["scan_detector"] = scan_properties.get( "hardware_source", dict()) data_and_metadata = ImportExportManager.convert_data_element_to_data_and_metadata( data_element) def create_and_display_data_item(): data_item = library.get_data_item_for_hardware_source( scan_hardware_source, channel_id=camera_hardware_source_id, processor_id="summed", create_if_needed=True, large_format=True) data_item.title = _("Spectrum Image {}".format( " x ".join([ str(d) for d in data_and_metadata.dimensional_shape ]))) # the data item should not have any other 'clients' at this point; so setting the # data and metadata will immediately unload the data (and write to disk). this is important, # because the data (up to this point) can be shared data from the DLL. data_item.set_data_and_metadata(data_and_metadata) # assert not data_item._data_item.is_data_loaded # now to display it will reload the data (presumably from an HDF5 or similar on-demand format). document_window.display_data_item(data_item) for scan_data_and_metadata in scan_data_list: scan_channel_id = scan_data_and_metadata.metadata[ "hardware_source"]["channel_id"] scan_channel_name = scan_data_and_metadata.metadata[ "hardware_source"]["channel_name"] channel_id = camera_hardware_source_id + "_" + scan_channel_id data_item = library.get_data_item_for_hardware_source( scan_hardware_source, channel_id=channel_id, create_if_needed=True) data_item.title = "{} ({})".format( _("Spectrum Image"), scan_channel_name) data_item.set_data_and_metadata( scan_data_and_metadata) document_window.display_data_item(data_item) document_window.queue_task(create_and_display_data_item ) # must occur on UI thread finally: self.acquisition_state_changed_event.fire( {"message": "end"}) logging.debug("end") except Exception as e: import traceback traceback.print_exc()
def measure_temperature(api: API.API, window: API.DocumentWindow): selected_display_items = window._document_controller._get_two_data_sources( ) document_model = window._document_controller.document_model error_msg = "Select two data items each containing one EEL spectrum in order to use this computation." assert selected_display_items[0][0] is not None, error_msg assert selected_display_items[1][0] is not None, error_msg assert selected_display_items[0][0].data_item is not None, error_msg assert selected_display_items[1][0].data_item is not None, error_msg assert selected_display_items[0][0].data_item.is_data_1d, error_msg assert selected_display_items[1][0].data_item.is_data_1d, error_msg # First find out which data item is near and which is far. Far should have the higher maximum. if np.amax(selected_display_items[0][0].data_item.data) > np.amax( selected_display_items[1][0].data_item.data): far_data_item = selected_display_items[0][0].data_item near_data_item = selected_display_items[1][0].data_item else: far_data_item = selected_display_items[1][0].data_item near_data_item = selected_display_items[0][0].data_item # Now we need to calculate the difference and display it so that we have a place to put the interval on difference_xdata = near_data_item.xdata - far_data_item.xdata difference_data_item = api.library.create_data_item_from_data_and_metadata( difference_xdata, title= f"Difference (Near - Far), ({near_data_item.title} - {far_data_item.title})" ) window.display_data_item(difference_data_item) calibration = difference_xdata.dimensional_calibrations[0] # Create the default interval from 20 meV to 100 meV graphic = difference_data_item.add_interval_region( calibration.convert_from_calibrated_value(0.02) / len(difference_xdata.data), calibration.convert_from_calibrated_value(0.1) / len(difference_xdata.data)) gain_data_item = api.library.create_data_item(title="Gain") gain_fit_data_item = api.library.create_data_item(title="Gain Fit") # Create the computation api.library.create_computation("eels.measure_temperature", inputs={ "near_data_item": api._new_api_object(near_data_item), "far_data_item": api._new_api_object(far_data_item), "fit_interval_graphic": graphic }, outputs={ "gain_fit_data_item": gain_fit_data_item, "gain_data_item": gain_data_item, "difference_data_item": difference_data_item }) # Set up the plot of Gain and Fit window.display_data_item(gain_data_item) window.display_data_item(gain_fit_data_item) gain_fit_display_item = document_model.get_display_item_for_data_item( gain_fit_data_item._data_item) gain_fit_display_item.append_display_data_channel_for_data_item( gain_data_item._data_item) gain_fit_display_item._set_display_layer_properties(0, label=_("Fit"), fill_color=None, stroke_color="#F00") gain_fit_display_item._set_display_layer_properties(1, label=_("Gain"), fill_color="#1E90FF") gain_fit_display_item.set_display_property("legend_position", "top-right") gain_fit_display_item.title = "Temperature Measurement Fit"
def align_multi_si(api: API_1_0.API, window: API_1_0.DocumentWindow): selected_display_items = window._document_controller._get_two_data_sources( ) error_msg = "Select a sequence of spectrum images and a sequence of scanned images in order to use this computation." assert selected_display_items[0][0] is not None, error_msg assert selected_display_items[1][0] is not None, error_msg assert selected_display_items[0][0].data_item is not None, error_msg assert selected_display_items[1][0].data_item is not None, error_msg assert selected_display_items[0][0].data_item.is_sequence, error_msg assert selected_display_items[1][0].data_item.is_sequence, error_msg di_1 = selected_display_items[0][0].data_item di_2 = selected_display_items[1][0].data_item haadf_footprint = (2, True, 0, True) di_1_footprint = (di_1.datum_dimension_count, di_1.is_sequence, di_1.collection_dimension_count, di_1.metadata.get("hardware_source", {}).get("harwdare_source_id", "") == "superscan") di_2_footprint = (di_2.datum_dimension_count, di_2.is_sequence, di_2.collection_dimension_count, di_2.metadata.get("hardware_source", {}).get("harwdare_source_id", "") == "superscan") di_1_points = 0 di_2_points = 0 print(di_1_footprint, di_2_footprint) for i in range(len(haadf_footprint)): di_1_points -= abs(haadf_footprint[i] - di_1_footprint[i]) di_2_points -= abs(haadf_footprint[i] - di_2_footprint[i]) print(di_1_points, di_2_points) if di_1_points > di_2_points: assert di_1_footprint[:-1] == haadf_footprint[:-1], error_msg haadf_sequence_data_item = api._new_api_object(di_1) si_sequence_data_item = api._new_api_object(di_2) elif di_2_points > di_1_points: assert di_2_footprint[:-1] == haadf_footprint[:-1], error_msg haadf_sequence_data_item = api._new_api_object(di_2) si_sequence_data_item = api._new_api_object(di_1) else: raise ValueError(error_msg) print('here') align_region = None for graphic in haadf_sequence_data_item.graphics: if graphic.graphic_type == 'rect-graphic': align_region = graphic break if align_region is None: align_region = haadf_sequence_data_item.add_rectangle_region( 0.5, 0.5, 0.75, 0.75) align_region.label = 'Alignment bounds' print('here2') align_index = haadf_sequence_data_item.display._display.display_data_channel.sequence_index aligned_haadf = api.library.create_data_item_from_data( numpy.zeros((1, 1, 1)), title="Aligned {}".format(haadf_sequence_data_item.title)) aligned_si = api.library.create_data_item_from_data( numpy.zeros((1, 1, 1)), title="Aligned {}".format(si_sequence_data_item.title)) inputs = { "si_sequence_data_item": si_sequence_data_item, "haadf_sequence_data_item": haadf_sequence_data_item, "align_index": align_index, "align_region": align_region } computation = api.library.create_computation("nion.align_multi_d_sequence", inputs=inputs, outputs={ "aligned_haadf": aligned_haadf, "aligned_si": aligned_si }) computation._computation.source = aligned_si._data_item window.display_data_item(aligned_haadf) window.display_data_item(aligned_si) print('here3')
def calibrate_spectrum(api: API_1_0.API, window: API_1_0.DocumentWindow): class UIHandler: def __init__(self, data_item: API_1_0.DataItem, src_data_item: API_1_0.DataItem, offset_graphic: API_1_0.Graphic, second_graphic: API_1_0.Graphic, units='eV'): self.ev_converter = Converter.PhysicalValueToStringConverter(units) self.property_changed_event = Event.Event() self.__data_item = data_item self.__src_data_item = src_data_item self.__offset_graphic = offset_graphic self.__second_graphic = second_graphic self.__offset_energy = 0 self.__graphic_updating = False self.__second_point = data_item.display_xdata.dimensional_calibrations[0].convert_to_calibrated_value(second_graphic.position * len(data_item.display_xdata.data)) self.__offset_changed_listener = offset_graphic._graphic.property_changed_event.listen(self.__offset_graphic_changed) self.__second_changed_listener = second_graphic._graphic.property_changed_event.listen(self.__second_graphic_changed) def close(self): self.__offset_changed_listener.close() self.__offset_changed_listener = None self.__second_changed_listener.close() self.__second_changed_listener = None self.__data_item = None self.__src_data_item = None self.__second_graphic = None self.__offset_graphic = None @property def offset_energy(self): return self.__offset_energy @offset_energy.setter def offset_energy(self, offset_energy): self.__offset_energy = offset_energy self.property_changed_event.fire("offset_energy") self.__update_calibration(keep_scale=True) @property def second_point(self): return self.__second_point @second_point.setter def second_point(self, energy): self.__second_point = energy self.property_changed_event.fire("second_point") self.__update_calibration() @contextlib.contextmanager def __lock_graphic_updates(self): self.__graphic_updating = True try: yield self.__graphic_updating finally: self.__graphic_updating = False def __update_calibration(self, keep_scale=False): dimensional_calibrations = copy.deepcopy(self.__data_item.display_xdata.dimensional_calibrations) energy_calibration = dimensional_calibrations[0] if keep_scale: scale = energy_calibration.scale else: scale = (self.__second_point - self.__offset_energy) / ((self.__second_graphic.position - self.__offset_graphic.position) * len(self.__data_item.display_xdata.data)) offset = self.__offset_energy - self.__offset_graphic.position * len(self.__data_item.display_xdata.data) * scale energy_calibration.scale = scale energy_calibration.offset = offset dimensional_calibrations = list(self.__src_data_item.xdata.dimensional_calibrations) dimensional_calibrations[-1] = energy_calibration self.__src_data_item.set_dimensional_calibrations(dimensional_calibrations) offset_graphic_position = (self.offset_energy - offset) / scale / len(self.__data_item.display_xdata.data) second_graphic_position = (self.second_point - offset) / scale / len(self.__data_item.display_xdata.data) with self.__lock_graphic_updates(): self.__offset_graphic.position = min(max(0, offset_graphic_position), 0.99) self.__second_graphic.position = min(max(0, second_graphic_position), 0.99) def __offset_graphic_changed(self, property_name): if not self.__graphic_updating: self.__update_calibration(keep_scale=True) def __second_graphic_changed(self, property_name): if not self.__graphic_updating: self.__update_calibration() ui = Declarative.DeclarativeUI() row_1 = ui.create_row(ui.create_label(text="Move the graphics in the spectrum and/or change the numbers\nin the fields below to change the calibration.\n" "The offset graphic will be positioned on the ZLP if possible."), margin=5, spacing=5) row_2 = ui.create_row(ui.create_label(text="Offset Point energy"), ui.create_line_edit(text="@binding(offset_energy, converter=ev_converter)"), ui.create_stretch(), margin=5, spacing=5) row_3 = ui.create_row(ui.create_label(text="Scale Point energy"), ui.create_line_edit(text="@binding(second_point, converter=ev_converter)"), ui.create_stretch(), margin=5, spacing=5) column = ui.create_column(row_1, row_2, row_3, ui.create_stretch(), margin=5, spacing=5) data_item: API_1_0.DataItem = window.target_data_item class DummyHandler: ... if data_item is None or data_item.xdata is None or not data_item.display_xdata.is_data_1d: window.show_modeless_dialog(ui.create_modeless_dialog(ui.create_label(text=("This tool cannot be used for the selected type of data.\n" "To use it you have to select a data item containing 1-D data or a sequence of 1-D data.")), title="Calibrate Spectrum", margin=10), handler=DummyHandler) return if data_item._data_item.is_live: window.show_modeless_dialog(ui.create_modeless_dialog(ui.create_label(text=("This tool cannot be used on live data.\n" "To use it you have to select a data item containing 1-D data or a sequence of 1-D data.")), title="Calibrate Spectrum", margin=10), handler=DummyHandler) return # This is the data item we will update the calibrations on. If the selected data item is the result of a pick # computation we will update the source SI. Otherwise we just update the spectrum itself. src_data_item = data_item for computation in api.library._document_model.computations: if computation.processing_id in {"pick-point", "pick-mask-average", "pick-mask-sum"}: if computation.get_output("target") == data_item._data_item: input_ = computation.get_input("src") # If input_ is a "DataSource" we need to get the actual data item if hasattr(input_, "data_item"): input_ = input_.data_item src_data_item = api._new_api_object(input_) mx_pos = numpy.nan try: mx_pos = ZLP_Analysis.estimate_zlp_amplitude_position_width_fit_spline(data_item.display_xdata.data)[1] except TypeError: pass # fallback to com if fit failed if mx_pos is numpy.nan: mx_pos = ZLP_Analysis.estimate_zlp_amplitude_position_width_com(data_item.display_xdata.data)[1] # fallback to simple max if everything else failed if mx_pos is numpy.nan: mx_pos = float(numpy.argmax(data_item.display_xdata.data)) # We need to move the detected maximum by half a pixel because we want to center the calibration and the graphic # on the pixel center but the maximum is calculated for the left edge. mx_pos += 0.5 dimensional_calibrations = list(data_item.display_xdata.dimensional_calibrations) energy_calibration = dimensional_calibrations[0] energy_calibration.offset = -mx_pos * energy_calibration.scale dimensional_calibrations = list(src_data_item.xdata.dimensional_calibrations) dimensional_calibrations[-1] = energy_calibration src_data_item.set_dimensional_calibrations(dimensional_calibrations) offset_graphic = data_item.add_channel_region(mx_pos / len(data_item.display_xdata.data)) offset_graphic.label = "Offset Point" offset_graphic._graphic.color = "#CE00AC" second_graphic = data_item.add_channel_region((offset_graphic.position + 1.0) * 0.5) second_graphic.label = "Scale Point" second_graphic._graphic.color = "#CE00AC" handler = UIHandler(data_item, src_data_item, offset_graphic, second_graphic, units=energy_calibration.units) dialog = typing.cast(Dialog.ActionDialog, Declarative.construct(window._document_controller.ui, window._document_controller, ui.create_modeless_dialog(column, title="Calibrate Spectrum"), handler)) def wc(w): data_item.remove_region(offset_graphic) data_item.remove_region(second_graphic) getattr(handler, "configuration_dialog_close_listener").close() delattr(handler, "configuration_dialog_close_listener") # use set handler to pass type checking. setattr(handler, "configuration_dialog_close_listener", dialog._window_close_event.listen(wc)) dialog.show() # Return the dialog which is useful for testing return dialog