コード例 #1
0
ファイル: Registry_test.py プロジェクト: icbicket/nionutils
 def test_registering_same_component_with_different_types_succeeds(self):
     o = object()
     Registry.register_component(o, {"one", "two"})
     Registry.register_component(o, {"three"})
     self.assertEqual(o, Registry.get_component("one"))
     self.assertEqual(o, Registry.get_component("three"))
     Registry.unregister_component(o, {"three"})
     self.assertEqual(o, Registry.get_component("one"))
     self.assertIsNone(Registry.get_component("three"))
     Registry.unregister_component(o)
     self.assertIsNone(Registry.get_component("one"))
 def set_default(self):
     # Connect to ronchigram camera and setup camera parameters
     stem_controller = Registry.get_component("stem_controller")
     ronchigram = stem_controller.ronchigram_camera
     frame_parameters = ronchigram.get_current_frame_parameters()
     frame_parameters["binning"] = self.binning
     frame_parameters["exposure_ms"] = self.exposure_ms
     ronchigram.start_playing(frame_parameters)
     stem_controller = Registry.get_component("stem_controller")
     # set up aberration coefficients
     idx = 0
     for abr_coeff in self.abr_list:
         stem_controller.SetVal(abr_coeff, self.default[idx])
         idx += 1
コード例 #3
0
 def _update_collection_index(axis, value):
     libertem_metadata = copy.deepcopy(src.metadata.get('libertem-io'))
     if not libertem_metadata:
         return
     file_parameters = libertem_metadata['file_parameters']
     file_type = file_parameters.pop('type')
     current_index = libertem_metadata['display_slice']['start']
     current_index = np.unravel_index(current_index, target.data.shape)
     if value == current_index[axis]:
         return
     executor = Registry.get_component('libertem_executor')
     if not executor:
         return
     executor = executor.ensure_sync()
     ds = dataset.load(file_type, executor, **file_parameters)
     roi = np.zeros(ds.shape.nav, dtype=bool)
     if axis == 0:
         roi[value, current_index[1]] = True
         current_index = (value, current_index[1])
     else:
         roi[current_index[0], value] = True
         current_index = (current_index[0], value)
     result = UDFRunner(PickUDF()).run_for_dataset(ds,
                                                   executor,
                                                   roi=roi)
     result_array = np.squeeze(np.array(result['intensity']))
     new_metadata = copy.deepcopy(src.metadata)
     new_display_slice = np.ravel_multi_index(current_index,
                                              target.data.shape)
     new_metadata['libertem-io']['display_slice'][
         'start'] = new_display_slice
     new_xdata = self.__api.create_data_and_metadata(
         result_array, metadata=new_metadata)
     src.set_data_and_metadata(new_xdata)
コード例 #4
0
def energy_diff_cross_section_nm2_per_ev(
        atomic_number: int, shell_number: int, subshell_index: int,
        edge_onset_ev: float, edge_delta_ev: float, beam_energy_ev: float,
        convergence_angle_rad: float,
        collection_angle_rad: float) -> numpy.ndarray:
    """Return the energy differential cross section for the specified electron shell and experimental parameters.

    The returned differential cross-section value is in units of nm * nm / eV.
    """
    energy_diff_sigma = None
    eels_analysis_service = Registry.get_component("eels_analysis_service")
    if energy_diff_sigma is None and hasattr(
            eels_analysis_service, "energy_diff_cross_section_nm2_per_ev"):
        energy_diff_sigma = eels_analysis_service.energy_diff_cross_section_nm2_per_ev(
            atomic_number=atomic_number,
            shell_number=shell_number,
            subshell_index=subshell_index,
            edge_onset_ev=edge_onset_ev,
            edge_delta_ev=edge_delta_ev,
            beam_energy_ev=beam_energy_ev,
            convergence_angle_rad=convergence_angle_rad,
            collection_angle_rad=collection_angle_rad)
    if energy_diff_sigma is None and shell_number == 1 and subshell_index == 1:
        # k edges only
        energy_diff_sigma = EELS_CrossSections.energy_diff_cross_section_nm2_per_ev(
            atomic_number, shell_number, subshell_index, edge_onset_ev,
            edge_delta_ev, beam_energy_ev, convergence_angle_rad,
            collection_angle_rad)
    return energy_diff_sigma
コード例 #5
0
ファイル: eels_analysis.py プロジェクト: jjkas/eels-analysis
def partial_cross_section_nm2(atomic_number: int, shell_number: int, subshell_index: int,
                              edge_onset_ev: float, edge_delta_ev: float, beam_energy_ev: float,
                              convergence_angle_rad: float, collection_angle_rad: float) -> float:
    """Returns the partial cross section.

    The return value units are nm * nm.
    """
    cross_section = None
    energy_diff_sigma = None
    egrid_ev = None
    eels_analysis_service = Registry.get_component("eels_analysis_service")
    if cross_section is None and hasattr(eels_analysis_service, "partial_cross_section_nm2"):
        cross_section = eels_analysis_service.partial_cross_section_nm2(atomic_number=atomic_number,
                                                                        shell_number=shell_number,
                                                                        subshell_index=subshell_index,
                                                                        edge_onset_ev=edge_onset_ev,
                                                                        edge_delta_ev=edge_delta_ev,
                                                                        beam_energy_ev=beam_energy_ev,
                                                                        convergence_angle_rad=convergence_angle_rad,
                                                                        collection_angle_rad=collection_angle_rad)

    if cross_section is None:
        energy_diff_sigma,egrid_ev,onset_thy = energy_diff_cross_section_nm2_per_ev(atomic_number=atomic_number,
                                                                 shell_number=shell_number,
                                                                 subshell_index=subshell_index,
                                                                 edge_onset_ev=edge_onset_ev,
                                                                 edge_delta_ev=edge_delta_ev,
                                                                 beam_energy_ev=beam_energy_ev,
                                                                 convergence_angle_rad=convergence_angle_rad,
                                                                 collection_angle_rad=collection_angle_rad)


        # Integrate over energy window to get partial cross-section
        #energy_sample_count = energy_diff_sigma.shape[0]
        #energy_step = edge_delta_ev / (energy_sample_count - 1)
        if len(energy_diff_sigma) > 0:
            energy_step = egrid_ev[1] - egrid_ev[0]
            energy_diff_sigma2 = energy_diff_sigma[numpy.where(egrid_ev <= edge_onset_ev + edge_delta_ev)]
            egrid_ev2 = egrid_ev[numpy.where(egrid_ev < edge_onset_ev + edge_delta_ev)]
            # integrate to theoretical onset + edge_delta
            #print('onset_thy =',onset_thy)
            cross_section = numpy.trapz(energy_diff_sigma2, dx=energy_step)
        else:
            energy_diff_sigma2 = energy_diff_sigma
            egrid_ev2 = egrid_ev
            cross_section = 0.0

    if cross_section is None and atomic_number == 32 and shell_number == 2 and subshell_index == 3:
        # special section for testing
        if abs(edge_delta_ev - 100) < 3:
            cross_section = 7.31e-8
        elif abs(edge_delta_ev - 120) < 3:
            cross_section = 8.79e-8
        elif abs(edge_delta_ev - 200) < 3:
            cross_section = 1.40e-7

    return cross_section, energy_diff_sigma2, egrid_ev2
 def acquire(self, number_frames, energy_offset, sleep_time):
     eels_camera = self.__eels_camera_choice.hardware_source
     if eels_camera:
         # setup the workspace layout
         self.__configure_start_workspace(self.document_controller.workspace_controller, eels_camera.hardware_source_id)
         # start the EELS acquisition
         eels_camera.start_playing()
         stem_controller = Registry.get_component("stem_controller")
         AcquireController().start_threaded_acquire_and_sum(stem_controller, eels_camera, number_frames,
                                                            energy_offset, sleep_time, self.document_controller,
                                                            functools.partial(self.set_final_layout))
コード例 #7
0
 def __init__(self, api):
     self.__api = api
     self.panel_id = 'Correct-High-Orders-Panel'
     self.panel_name = _('Correct High Orders')
     self.panel_positions = ['left', 'right']
     self.panel_position = 'right'
     self.settings = {'order': 'C4s'}
     self.knobs = {'C4s': ['6Sa', '6Sb', '11Sa', '11Sb', '16Sa', '16Sb']}
     self.update_knobs = {'C4s': 'UpdateC4'}
     self.aberrations = {'C4s': ['C41.', 'C43.', 'C45.']}
     self.__excitations = {}
     self.stem_controller = Registry.get_component('stem_controller')
コード例 #8
0
 def close(self) -> None:
     self.document_controller.periodic()
     self.document_controller.close()
     for ex in self.__exit_stack:
         ex.close()
     stem_controller.unregister_event_loop()
     self.camera_hardware_source.close()
     HardwareSource.HardwareSourceManager().unregister_hardware_source(
         self.camera_hardware_source)
     ScanDevice.stop()
     scan_base.stop()
     Registry.unregister_component(
         Registry.get_component("stem_controller"), {"stem_controller"})
     HardwareSource.HardwareSourceManager()._close_instruments()
     HardwareSource.stop()
     super().close()
コード例 #9
0
ファイル: __init__.py プロジェクト: tcpekin/libertem-hackaton
    def read_data_and_metadata_from_stream(self, stream):
        executor = Registry.get_component('libertem_executor')
        if executor is None:
            logging.error(
                'No libertem executor could be retrieved from the Registry.')
            return
        executor = executor.ensure_sync()
        file_parameters = dataset.detect(stream, executor=executor)
        file_type = file_parameters.pop('type', None)
        if file_type is None:
            file_type = 'raw'
            file_parameters = {'path': stream}
        file_params = dict()

        def params_callback(file_params_):
            file_params.update(file_params_)

        self.__api.queue_task(
            lambda: self.show_file_param_dialog(file_type, params_callback))
        self.__show_file_param_dialog_finished_event.wait()
        self.__show_file_param_dialog_finished_event.clear()
        self.__file_param_dialog_closed_event.wait()
        file_params.pop('name', None)
        file_parameters.update(file_params)

        ds = dataset.load(file_type, executor, **file_parameters)
        roi = np.zeros(ds.shape.nav, dtype=bool)
        roi_flat = roi.ravel()
        roi_flat[0] = True
        result = UDFRunner(PickUDF()).run_for_dataset(ds, executor, roi=roi)
        result_array = np.squeeze(np.array(result['intensity']))
        file_parameters['type'] = file_type
        metadata = {
            'libertem-io': {
                'file_parameters': file_parameters,
                'display_slice': {
                    'start': 0,
                    'stop': 0
                }
            }
        }
        return self.__api.create_data_and_metadata(result_array,
                                                   metadata=metadata)
    def acquire_series(self, abr_coeff, abr_range, nsteps):
        # name of aberration coefficient to vary
        self.abr_coeff = abr_coeff
        # total range of aberration in m
        self.abr_range = abr_range
        # number of steps to change the aberration coefficients.
        self.nsteps = nsteps
        default_val = self.default[self.abr_list.index(abr_coeff)]

        # initialize list for aberration and image.
        value_list = [
            (i - self.nsteps // 2) * self.abr_range / self.nsteps + default_val
            for i in range(self.nsteps)
        ]
        self.image_stack = []
        # Connect to stem controller to setup aberration
        stem_controller = Registry.get_component("stem_controller")
        success, _ = stem_controller.TryGetVal(abr_coeff)
        print(success)
        ronchigram = stem_controller.ronchigram_camera
        # start acquisition for each aberration value in the list, in a separate thread.
        for i in value_list:
            for _ in range(self.rep):
                if stem_controller.SetVal(self.abr_coeff, i):
                    threading.Thread(
                        target=self.acquire_frame(ronchigram)).start()
                    print(self.abr_coeff + ' ' + str(i))
        # After acquisition, set the value back to the default number.
        stem_controller.SetVal(
            self.abr_coeff, self.default[self.abr_list.index(self.abr_coeff)])

        # save the acquired image stack.
        image_stack_array = np.asarray(self.image_stack)
        filename = self.abr_coeff + '_' + str(abr_range) + 'm_' + str(
            self.nsteps) + 'steps_' + str(self.exposure_ms) + 'ms_bin' + str(
                self.binning) + '_repx' + str(
                    self.rep) + 'localmin_fullframe.npy'
        print(self.path + filename)
        np.save(self.path + filename, image_stack_array)
        del image_stack_array
        return
コード例 #11
0
ファイル: MultiEELS_GUI.py プロジェクト: Brow71189/MultiEELS
        def start_clicked():
            if self.__acquisition_running:
                self.MultiEELS.cancel()
            else:
                self.stem_controller = Registry.get_component(
                    'stem_controller')
                self.MultiEELS.stem_controller = self.stem_controller
                self.MultiEELS.camera = self.camera_choice_combo_box.current_item

                def run_multi_eels():
                    data_dict = self.MultiEELS.acquire_multi_eels_spectrum()

                    def create_and_display_data_item():
                        self.create_result_data_item(data_dict)

                    document_controller.queue_task(create_and_display_data_item
                                                   )  # must occur on UI thread

                self.__acquisision_thread = threading.Thread(
                    target=run_multi_eels, daemon=True)
                self.__acquisision_thread.start()
コード例 #12
0
ファイル: MultiEELS_GUI.py プロジェクト: Brow71189/MultiEELS
 def start_si_clicked():
     if self.__acquisition_running:
         self.MultiEELS.cancel()
     else:
         self.stem_controller = Registry.get_component(
             'stem_controller')
         self.superscan = self.stem_controller.scan_controller
         self.MultiEELS.stem_controller = self.stem_controller
         self.MultiEELS.camera = self.camera_choice_combo_box.current_item
         self.MultiEELS.superscan = self.superscan
         self.result_data_items = {}
         self.__new_data_ready_event_listener = self.MultiEELS.new_data_ready_event.listen(
             self.add_to_display_queue)
         self.__data_processed_event.clear()
         self.__display_thread = threading.Thread(
             target=self.process_display_queue)
         self.__display_thread.start()
         self.__acquisision_thread = threading.Thread(
             target=self.MultiEELS.acquire_multi_eels_spectrum_image,
             daemon=True)
         self.__acquisision_thread.start()
コード例 #13
0
    def __init__(self,
                 *,
                 is_eels: bool = False,
                 camera_exposure: float = 0.025):
        super().__init__()

        # HardwareSource.run()
        # camera_base.run(configuration_location)
        # scan_base.run()
        # video_base.run()
        # CameraControlPanel.run()
        # ScanControlPanel.run()
        # MultipleShiftEELSAcquire.run()
        # VideoControlPanel.run()

        HardwareSource.run()
        instrument = self.setup_stem_controller()
        ScanDevice.run(typing.cast(InstrumentDevice.Instrument, instrument))
        scan_base.run()
        scan_hardware_source = Registry.get_component("scan_hardware_source")
        camera_hardware_source = self.setup_camera_hardware_source(
            instrument, camera_exposure, is_eels)
        HardwareSource.HardwareSourceManager().hardware_sources = []
        HardwareSource.HardwareSourceManager(
        ).hardware_source_added_event = Event.Event()
        HardwareSource.HardwareSourceManager(
        ).hardware_source_removed_event = Event.Event()
        self.instrument = instrument
        self.scan_hardware_source = scan_hardware_source
        self.camera_hardware_source = camera_hardware_source
        HardwareSource.HardwareSourceManager().register_hardware_source(
            self.camera_hardware_source)
        HardwareSource.HardwareSourceManager().register_hardware_source(
            self.scan_hardware_source)
        self.document_controller = self.create_document_controller(
            auto_close=False)
        self.document_model = self.document_controller.document_model
        stem_controller.register_event_loop(
            self.document_controller.event_loop)
        self.__exit_stack: typing.List[typing.Any] = list()
コード例 #14
0
 def acquire(self, number_frames: int, energy_offset: float,
             sleep_time: int, dark_ref_choice: bool,
             dark_file: pathlib.Path, cross_cor_choice: bool) -> None:
     if number_frames <= 0:
         return
     # Function to set up and start acquisition
     eels_camera = typing.cast(
         typing.Optional[camera_base.CameraHardwareSource],
         self.__eels_camera_choice.hardware_source)
     if eels_camera:
         # setup the workspace layout
         workspace_controller = self.document_controller.workspace_controller
         assert workspace_controller
         self.__configure_start_workspace(workspace_controller,
                                          eels_camera.hardware_source_id)
         # start the EELS acquisition
         eels_camera.start_playing()
         stem_controller = typing.cast(
             typing.Optional[STEMController.STEMController],
             Registry.get_component("stem_controller"))
         assert stem_controller
         if dark_ref_choice is False:
             # Dark reference is undesired
             dark_ref_data = None
         else:
             # Dark reference is desired: import from the file given, if
             # the import does not succeed (file does not exist or no path
             # was given), then set dark_ref_data to None
             dark_ref_import = ImportExportManager.ImportExportManager(
             ).read_data_items(dark_file)
             if dark_ref_import:
                 dark_ref_data = dark_ref_import[0].data
             else:
                 dark_ref_data = None
         AcquireController().start_threaded_acquire_and_sum(
             stem_controller, eels_camera, number_frames, energy_offset,
             sleep_time, dark_ref_choice, dark_ref_data, cross_cor_choice,
             self.document_controller,
             functools.partial(self.set_final_layout))
コード例 #15
0
 def stem_controller(self) -> stem_controller.STEMController:
     if self._stem_controller is None:
         self._stem_controller = typing.cast(
             stem_controller.STEMController,
             Registry.get_component('stem_controller'))
     return self._stem_controller
 def stop_playing(self):
     stem_controller = Registry.get_component("stem_controller")
     ronchigram = stem_controller.ronchigram_camera
     ronchigram.stop_playing()
     return
コード例 #17
0
def stop() -> None:
    Registry.unregister_component(Registry.get_component("scan_device"),
                                  {"scan_device"})
コード例 #18
0
    def create_panel_widget(
            self, ui: Facade.UserInterface,
            document_controller: Facade.DocumentWindow) -> Facade.ColumnWidget:
        stem_controller_ = typing.cast(
            stem_controller.STEMController,
            Registry.get_component("stem_controller"))

        self.__scan_hardware_source_choice_model = ui._ui.create_persistent_string_model(
            "scan_acquisition_hardware_source_id")
        self.__scan_hardware_source_choice = HardwareSourceChoice.HardwareSourceChoice(
            self.__scan_hardware_source_choice_model,
            lambda hardware_source: hardware_source.features.get(
                "is_scanning", False))
        self.__camera_hardware_source_choice_model = ui._ui.create_persistent_string_model(
            "scan_acquisition_camera_hardware_source_id")
        self.__camera_hardware_source_choice = HardwareSourceChoice.HardwareSourceChoice(
            self.__camera_hardware_source_choice_model,
            lambda hardware_source: hardware_source.features.get(
                "is_camera", False))

        self.__scan_hardware_source_stream = HardwareSourceChoice.HardwareSourceChoiceStream(
            self.__scan_hardware_source_choice).add_ref()
        self.__camera_hardware_source_stream = HardwareSourceChoice.HardwareSourceChoiceStream(
            self.__camera_hardware_source_choice).add_ref()

        def clear_scan_context_fields() -> None:
            self.__roi_description.text = _("Scan context not active")
            self.__scan_label_widget.text = None
            self.__scan_specifier.scan_context = stem_controller.ScanContext()
            self.__scan_specifier.scan_count = 1
            self.__scan_specifier.size = None
            self.__scan_specifier.drift_interval_lines = 0
            self.__scan_specifier.drift_interval_scans = 0
            self.__acquire_button._widget.enabled = self.__acquisition_state == SequenceState.scanning  # focus will be on the SI data, so enable if scanning
            self.__scan_pixels = 0

        def update_context() -> None:
            assert self.__scan_hardware_source_choice
            scan_hardware_source = typing.cast(
                scan_base.ScanHardwareSource,
                self.__scan_hardware_source_choice.hardware_source)
            if not scan_hardware_source:
                clear_scan_context_fields()
                return

            scan_context = scan_hardware_source.scan_context

            scan_context_size = scan_context.size
            exposure_ms = self.__exposure_time_ms_value_model.value or 0.0 if self.__exposure_time_ms_value_model else 0.0
            if scan_context.is_valid and scan_hardware_source.line_scan_enabled and scan_hardware_source.line_scan_vector:
                assert scan_context_size
                calibration = scan_context.calibration
                start = Geometry.FloatPoint.make(
                    scan_hardware_source.line_scan_vector[0])
                end = Geometry.FloatPoint.make(
                    scan_hardware_source.line_scan_vector[1])
                length = int(
                    Geometry.distance(start, end) * scan_context_size.height)
                max_dim = max(scan_context_size.width,
                              scan_context_size.height)
                length_str = calibration.convert_to_calibrated_size_str(
                    length, value_range=(0, max_dim), samples=max_dim)
                line_str = _("Line Scan")
                self.__roi_description.text = f"{line_str} {length_str} ({length} px)"
                scan_str = _("Scan (1D)")
                scan_length = max(self.__scan_width, 1)
                self.__scan_label_widget.text = f"{scan_str} {scan_length} px"
                self.__scan_pixels = scan_length
                self.__scan_specifier.scan_context = copy.deepcopy(
                    scan_context)
                self.__scan_specifier.scan_count = max(self.__scan_count, 1)
                self.__scan_specifier.size = 1, scan_length
                self.__scan_specifier.drift_interval_lines = 0
                self.__scan_specifier.drift_interval_scans = 0
                self.__acquire_button._widget.enabled = True
            elif scan_context.is_valid and scan_hardware_source.subscan_enabled and scan_hardware_source.subscan_region:
                assert scan_context_size
                calibration = scan_context.calibration
                width = scan_hardware_source.subscan_region.width * scan_context_size.width
                height = scan_hardware_source.subscan_region.height * scan_context_size.height
                width_str = calibration.convert_to_calibrated_size_str(
                    width,
                    value_range=(0, scan_context_size.width),
                    samples=scan_context_size.width)
                height_str = calibration.convert_to_calibrated_size_str(
                    height,
                    value_range=(0, scan_context_size.height),
                    samples=scan_context_size.height)
                rect_str = _("Subscan")
                self.__roi_description.text = f"{rect_str} {width_str} x {height_str} ({int(width)} px x {int(height)} px)"
                scan_str = _("Scan (2D)")
                scan_width = self.__scan_width
                scan_height = int(self.__scan_width * height / width)
                drift_lines = scan_hardware_source.calculate_drift_lines(
                    scan_width, exposure_ms /
                    1000) if scan_hardware_source else 0
                drift_str = f" / Drift {drift_lines} lines" if drift_lines > 0 else str(
                )
                drift_scans = scan_hardware_source.calculate_drift_scans()
                drift_str = f" / Drift {drift_scans} scans" if drift_scans > 0 else drift_str
                self.__scan_label_widget.text = f"{scan_str} {scan_width} x {scan_height} px" + drift_str
                self.__scan_pixels = scan_width * scan_height
                self.__scan_specifier.scan_context = copy.deepcopy(
                    scan_context)
                self.__scan_specifier.scan_count = max(self.__scan_count, 1)
                self.__scan_specifier.size = scan_height, scan_width
                self.__scan_specifier.drift_interval_lines = drift_lines
                self.__scan_specifier.drift_interval_scans = drift_scans
                self.__acquire_button._widget.enabled = True
            elif scan_context.is_valid:
                assert scan_context_size
                calibration = scan_context.calibration
                width = scan_context_size.width
                height = scan_context_size.height
                width_str = calibration.convert_to_calibrated_size_str(
                    width,
                    value_range=(0, scan_context_size.width),
                    samples=scan_context_size.width)
                height_str = calibration.convert_to_calibrated_size_str(
                    height,
                    value_range=(0, scan_context_size.height),
                    samples=scan_context_size.height)
                data_str = _("Context Scan")
                self.__roi_description.text = f"{data_str} {width_str} x {height_str} ({int(width)} x {int(height)})"
                scan_str = _("Scan (2D)")
                scan_width = self.__scan_width
                scan_height = int(self.__scan_width * height / width)
                drift_lines = scan_hardware_source.calculate_drift_lines(
                    scan_width, exposure_ms /
                    1000) if scan_hardware_source else 0
                drift_str = f" / Drift {drift_lines} lines" if drift_lines > 0 else str(
                )
                drift_scans = scan_hardware_source.calculate_drift_scans()
                drift_str = f" / Drift {drift_scans} scans" if drift_scans > 0 else drift_str
                self.__scan_label_widget.text = f"{scan_str} {scan_width} x {scan_height} px" + drift_str
                self.__scan_pixels = scan_width * scan_height
                self.__scan_specifier.scan_context = copy.deepcopy(
                    scan_context)
                self.__scan_specifier.scan_count = max(self.__scan_count, 1)
                self.__scan_specifier.size = scan_height, scan_width
                self.__scan_specifier.drift_interval_lines = drift_lines
                self.__scan_specifier.drift_interval_scans = drift_scans
                self.__acquire_button._widget.enabled = True
            else:
                clear_scan_context_fields()

            self.__scan_count_widget.text = Converter.IntegerToStringConverter(
            ).convert(self.__scan_count)

            self.__scan_width_widget.text = Converter.IntegerToStringConverter(
            ).convert(self.__scan_width)

            self.__update_estimate()

        def stem_controller_property_changed(key: str) -> None:
            if key in ("subscan_state", "subscan_region", "subscan_rotation",
                       "line_scan_state", "line_scan_vector",
                       "drift_channel_id", "drift_region", "drift_settings"):
                document_controller._document_controller.event_loop.call_soon_threadsafe(
                    update_context)

        def scan_context_changed() -> None:
            # this can be triggered from a thread, so use call soon to transfer it to the UI thread.
            document_controller._document_controller.event_loop.call_soon_threadsafe(
                update_context)

        self.__stem_controller_property_listener = None
        self.__scan_context_changed_listener = None

        if stem_controller_:
            self.__stem_controller_property_listener = stem_controller_.property_changed_event.listen(
                stem_controller_property_changed)
            self.__scan_context_changed_listener = stem_controller_.scan_context_changed_event.listen(
                scan_context_changed)

        column = ui.create_column_widget()

        self.__styles_list_model = ListModel.ListModel[
            ScanAcquisitionProcessing](items=[
                ScanAcquisitionProcessing.SUM_PROJECT,
                ScanAcquisitionProcessing.NONE
            ])
        self.__styles_list_property_model = ListModel.ListPropertyModel(
            self.__styles_list_model)
        self.__style_combo_box = ui.create_combo_box_widget(
            self.__styles_list_property_model.value,
            item_text_getter=operator.attrgetter("value.display_name"))
        self.__style_combo_box._widget.set_property("min-width", 100)
        items_binding = Binding.PropertyBinding(
            self.__styles_list_property_model, "value")
        items_binding.source_setter = None
        typing.cast(UserInterfaceModule.ComboBoxWidget,
                    self.__style_combo_box._widget).bind_items(items_binding)
        self.__style_combo_box.current_index = 0

        self.__acquire_button = ui.create_push_button_widget(_("Acquire"))

        self.__progress_bar = ui.create_progress_bar_widget()
        # self.__progress_bar.enabled = False

        self.__roi_description = ui.create_label_widget()

        self.__scan_count_widget = ui.create_line_edit_widget()
        self.__scan_count_widget._widget.set_property("width", 72)

        self.__scan_processing_widget = ui.create_combo_box_widget(
            items=["Raw", "Sum", "Raw + Sum"])

        self.__scan_width_widget = ui.create_line_edit_widget()

        self.__exposure_time_widget = ui.create_line_edit_widget()

        self.__estimate_label_widget = ui.create_label_widget()

        self.__scan_label_widget = ui.create_label_widget()

        class ComboBoxWidget:
            def __init__(self,
                         widget: UserInterfaceModule.ComboBoxWidget) -> None:
                self.__combo_box_widget = widget

            @property
            def _widget(self) -> UserInterfaceModule.ComboBoxWidget:
                return self.__combo_box_widget

        camera_row = ui.create_row_widget()
        camera_row.add_spacing(12)
        camera_row.add(
            ComboBoxWidget(
                self.__camera_hardware_source_choice.create_combo_box(ui._ui)))
        camera_row.add_spacing(12)
        camera_row.add(self.__style_combo_box)
        camera_row.add_spacing(12)
        camera_row.add_stretch()

        scan_choice_row = ui.create_row_widget()
        scan_choice_row.add_spacing(12)
        scan_choice_row.add(
            ComboBoxWidget(
                self.__scan_hardware_source_choice.create_combo_box(ui._ui)))
        scan_choice_row.add_spacing(12)
        scan_choice_row.add_stretch()

        scan_count_row = ui.create_row_widget()
        scan_count_row.add_spacing(12)
        scan_count_row.add(ui.create_label_widget("Scan Count"))
        scan_count_row.add_spacing(12)
        scan_count_row.add(self.__scan_count_widget)
        scan_count_row.add_spacing(12)
        scan_count_row.add(self.__scan_processing_widget)
        scan_count_row.add_spacing(12)
        scan_count_row.add_stretch()

        roi_size_row = ui.create_row_widget()
        roi_size_row.add_spacing(12)
        roi_size_row.add(self.__roi_description)
        roi_size_row.add_spacing(12)
        roi_size_row.add_stretch()

        scan_spacing_pixels_row = ui.create_row_widget()
        scan_spacing_pixels_row.add_spacing(12)
        scan_spacing_pixels_row.add(
            ui.create_label_widget("Scan Width (pixels)"))
        scan_spacing_pixels_row.add_spacing(12)
        scan_spacing_pixels_row.add(self.__scan_width_widget)
        scan_spacing_pixels_row.add_spacing(12)
        scan_spacing_pixels_row.add_stretch()

        eels_exposure_row = ui.create_row_widget()
        eels_exposure_row.add_spacing(12)
        eels_exposure_row.add(
            ui.create_label_widget("Camera Exposure Time (ms)"))
        eels_exposure_row.add_spacing(12)
        eels_exposure_row.add(self.__exposure_time_widget)
        eels_exposure_row.add_spacing(12)
        eels_exposure_row.add_stretch()

        scan_row = ui.create_row_widget()
        scan_row.add_spacing(12)
        scan_row.add(self.__scan_label_widget)
        scan_row.add_stretch()

        estimate_row = ui.create_row_widget()
        estimate_row.add_spacing(12)
        estimate_row.add(self.__estimate_label_widget)
        estimate_row.add_stretch()

        acquire_sequence_button_row = ui.create_row_widget()
        acquire_sequence_button_row.add(self.__acquire_button)
        acquire_sequence_button_row.add_spacing(8)
        acquire_sequence_button_row.add(self.__progress_bar)
        acquire_sequence_button_row.add_spacing(8)

        if self.__scan_hardware_source_choice.hardware_source_count > 1:
            column.add_spacing(8)
            column.add(scan_choice_row)
        column.add_spacing(8)
        column.add(camera_row)
        column.add_spacing(8)
        column.add(scan_count_row)
        column.add_spacing(8)
        column.add(roi_size_row)
        column.add_spacing(8)
        column.add(scan_spacing_pixels_row)
        column.add_spacing(8)
        column.add(eels_exposure_row)
        column.add_spacing(8)
        column.add(scan_row)
        column.add_spacing(8)
        column.add(estimate_row)
        column.add_spacing(8)
        column.add(acquire_sequence_button_row)
        column.add_spacing(8)
        column.add_stretch()

        def camera_hardware_source_changed(
            hardware_source: typing.Optional[HardwareSource.HardwareSource]
        ) -> None:
            styles_list_model = self.__styles_list_model
            self.disconnect_camera_hardware_source()
            if hardware_source and styles_list_model:
                self.connect_camera_hardware_source(hardware_source)
                if hardware_source.features.get("has_masked_sum_option"):
                    styles_list_model.items = [
                        ScanAcquisitionProcessing.SUM_PROJECT,
                        ScanAcquisitionProcessing.NONE,
                        ScanAcquisitionProcessing.SUM_MASKED
                    ]
                else:
                    styles_list_model.items = [
                        ScanAcquisitionProcessing.SUM_PROJECT,
                        ScanAcquisitionProcessing.NONE
                    ]

        self.__camera_hardware_changed_event_listener = self.__camera_hardware_source_choice.hardware_source_changed_event.listen(
            camera_hardware_source_changed)
        camera_hardware_source_changed(
            self.__camera_hardware_source_choice.hardware_source)

        def style_current_item_changed(current_item: str) -> None:
            self.__update_estimate()

        self.__style_combo_box.on_current_item_changed = style_current_item_changed

        def scan_count_changed(text: str) -> None:
            scan_count = Converter.IntegerToStringConverter().convert_back(
                text) or 1
            scan_count = max(scan_count, 1)
            if scan_count != self.__scan_count:
                self.__scan_count = scan_count
                update_context()
            self.__scan_count_widget.request_refocus()

        self.__scan_count_widget.on_editing_finished = scan_count_changed

        def scan_width_changed(text: str) -> None:
            scan_width = Converter.IntegerToStringConverter().convert_back(
                text) or 1
            scan_width = max(scan_width, 1)
            if scan_width != self.__scan_width:
                self.__scan_width = scan_width
                update_context()
            self.__scan_width_widget.request_refocus()

        self.__scan_width_widget.on_editing_finished = scan_width_changed

        def acquisition_state_changed(
                acquisition_state: SequenceState) -> None:
            self.__acquisition_state = acquisition_state

            async def update_state(is_idle: bool) -> None:
                self.__acquire_button.text = _("Acquire") if is_idle else _(
                    "Cancel")
                # self.__progress_bar.enabled = not is_idle
                update_context()  # update the cancel button
                if is_idle and self.__progress_task:
                    self.__progress_task.cancel()
                    self.__progress_task = None
                    self.__progress_bar.value = 100
                if not is_idle and not self.__progress_task:

                    async def update_progress() -> None:
                        while True:
                            if self.__scan_acquisition_controller:
                                self.__progress_bar.value = int(
                                    100 *
                                    self.__scan_acquisition_controller.progress
                                )
                            await asyncio.sleep(0.25)

                    self.__progress_task = document_controller._document_window.event_loop.create_task(
                        update_progress())

            if acquisition_state == SequenceState.idle:
                self.__scan_acquisition_controller = None
                if self.__acquisition_state_changed_event_listener:
                    self.__acquisition_state_changed_event_listener.close()
                    self.__acquisition_state_changed_event_listener = None
                document_controller._document_window.event_loop.create_task(
                    update_state(True))
            else:
                document_controller._document_window.event_loop.create_task(
                    update_state(False))

        def acquire_sequence() -> None:
            if self.__scan_acquisition_controller:
                if self.__scan_acquisition_controller:
                    self.__scan_acquisition_controller.cancel()
            else:
                scan_hardware_source_choice = self.__scan_hardware_source_choice
                assert scan_hardware_source_choice
                if scan_hardware_source_choice.hardware_source:
                    scan_hardware_source = self.__api.get_hardware_source_by_id(
                        scan_hardware_source_choice.hardware_source.
                        hardware_source_id,
                        version="1.0")
                else:
                    scan_hardware_source = None

                camera_hardware_source_choice = self.__camera_hardware_source_choice
                assert camera_hardware_source_choice
                if camera_hardware_source_choice.hardware_source:
                    camera_hardware_source = self.__api.get_hardware_source_by_id(
                        camera_hardware_source_choice.hardware_source.
                        hardware_source_id,
                        version="1.0")
                else:
                    camera_hardware_source = None

                if scan_hardware_source and camera_hardware_source:
                    self.__scan_acquisition_controller = ScanAcquisitionController(
                        self.__api, document_controller, scan_hardware_source,
                        camera_hardware_source, self.__scan_specifier)
                    self.__acquisition_state_changed_event_listener = self.__scan_acquisition_controller.acquisition_state_changed_event.listen(
                        acquisition_state_changed)
                    scan_processing = ScanProcessing(
                        self.__scan_processing_widget.current_index in (0, 2),
                        self.__scan_processing_widget.current_index in (1, 2))
                    scan_acquisition_processing = self.__style_combo_box.current_item if self.__style_combo_box and self.__style_combo_box.current_item else ScanAcquisitionProcessing.NONE
                    self.__scan_acquisition_controller.start(
                        scan_acquisition_processing, scan_processing)

        self.__acquire_button.on_clicked = acquire_sequence

        self.__update_estimate()

        update_context()

        return column
コード例 #19
0
 def stem_controller(self):
     if self._stem_controller is None:
         self._stem_controller = Registry.get_component('stem_controller')
     return self._stem_controller
コード例 #20
0
ファイル: Map4D.py プロジェクト: tcpekin/libertem-hackaton
    def menu_item_execute(self, window: API.DocumentWindow) -> None:
        document_controller = window._document_controller
        selected_display_item = document_controller.selected_display_item
        data_item = (selected_display_item.data_items[0]
                     if selected_display_item
                     and len(selected_display_item.data_items) > 0 else None)

        if data_item:
            api_data_item = Facade.DataItem(data_item)
            ds = None
            if not api_data_item.xdata.metadata.get('libertem-io'):
                executor = Registry.get_component('libertem_executor')
                if not executor:
                    return
                ds = LiberTEMAdapter(
                    self.__api,
                    executor).niondata_to_libertemdata(api_data_item)
                if not api_data_item.xdata.metadata.get('libertem-io'):
                    self.__show_tool_tips('wrong_shape')
                    return
            map_data_item = self.__api.library.create_data_item(
                title='Map 4D of ' + data_item.title)
            display_item = document_controller.document_model.get_display_item_for_data_item(
                map_data_item._data_item)
            show_display_item(window, display_item)
            map_regions = list()
            for graphic in api_data_item.graphics:
                if graphic._graphic.role == 'mask':
                    map_regions.append(graphic)
            computation = self.__api.library.create_computation(
                'nion.libertem.map_4d',
                inputs={
                    'src': api_data_item,
                    'map_regions': map_regions
                },
                outputs={'target': map_data_item})
            computation._computation.source = data_item
            if ds is not None:
                computation._computation.ds = ds

            map_display_item = document_controller.document_model.get_display_item_for_data_item(
                map_data_item)
            document_controller.show_display_item(map_display_item)
            pick_graphic = map_data_item.add_point_region(0.5, 0.5)
            pick_graphic.label = 'Pick'

            threading.Thread(target=self.__connect_pick_graphic,
                             args=(api_data_item, map_data_item, pick_graphic,
                                   computation._computation, 30),
                             daemon=True).start()

            self.__computation_data_items.update({
                str(data_item.uuid):
                'source',
                str(map_data_item._data_item.uuid):
                'map_4d'
            })
            self.__api.application.document_controllers[
                0]._document_controller.ui.set_persistent_string(
                    'libertem_map4d_data_items_0',
                    json.dumps(self.__computation_data_items))
            self.__show_tool_tips()
コード例 #21
0
def hardware_source_manager() -> HardwareSourceManagerInterface:
    return typing.cast(HardwareSourceManagerInterface,
                       Registry.get_component("hardware_source_manager"))
コード例 #22
0
 def scan_controller(self) -> HardwareSource.HardwareSource:
     if self.__scan_controller:
         return self.__scan_controller
     return Registry.get_component("scan_hardware_source")
    def __init__(self,
                 dev_ids,
                 start_point=None,
                 CNNoption=1,
                 CNNpath='',
                 act_list=[],
                 readDefault=False):
        # Basic setups
        os.environ["CUDA_VISIBLE_DEVICES"] = "0"  # specify which GPU to use
        self.pvs = np.array(dev_ids)
        self.name = 'Nion'
        self.CNNoption = CNNoption

        # initialize aberration list, this has to come before setting aberrations
        self.abr_list = [
            "C10", "C12.x", "C12.y", "C21.x", "C21.y", "C23.x", "C23.y", "C30",
            "C32.x", "C32.y", "C34.x", "C34.y"
        ]
        self.default = [
            2e-9, 2e-9, 2e-9, 20e-9, 20e-9, 20e-9, 20e-9, 0.5e-6, 0.5e-6,
            0.5e-6, 0.5e-6, 0.5e-6
        ]
        self.abr_lim = [
            2e-6, 2e-6, 2e-6, 3e-5, 3e-5, 3e-5, 3e-5, 4e-4, 3e-4, 3e-4, 2e-4,
            2e-4
        ]
        self.activate = act_list

        # option to read existing default value, can be used when running experiment
        self.readDefault = readDefault
        self.aperture = 0

        # Initialize stem controller
        self.stem_controller = Registry.get_component("stem_controller")
        for i in range(len(self.abr_list)):
            abr_coeff = self.abr_list[i]
            _, val = self.stem_controller.TryGetVal(abr_coeff)
            if self.readDefault:
                self.default[i] = val
            print(abr_coeff + ' successfully loaded.')

        # Connect to ronchigram camera and setup camera parameters
        self.ronchigram = self.stem_controller.ronchigram_camera
        frame_parameters = self.ronchigram.get_current_frame_parameters()
        frame_parameters["binning"] = 1
        frame_parameters["exposure_ms"] = 50  # TODO, change to a variable

        # define a variable to save the frame acquired from camera
        self.size = 128
        self.frame = np.zeros([self.size, self.size])

        # Load the CNN model for objective prediction
        if CNNoption == 1:
            # load CNN architecture in a separate thread
            threading.Thread(target=self.loadCNN(CNNpath))
            # self.CNNmodel = self.loadCNN(CNNpath); # hard coded model path for now
            gpus = tf.config.experimental.list_physical_devices('GPU')
            if gpus:
                try:
                    for gpu in gpus:
                        tf.config.experimental.set_memory_growth(gpu, True)
                except RuntimeError as e:
                    print(e)

        if type(start_point) == type(None):
            current_x = np.zeros(
                len(self.pvs)
            )  #replace with expression that reads current ctrl pv values (x) from machine
            self.setX(current_x)
        else:
            self.setX(start_point)
コード例 #24
0
def acquire_multi_eels(interactive, api):
    # first grab the stem controller object by asking the Registry
    stem_controller = Registry.get_component("stem_controller")

    # establish the EELS camera object and stop it if it is playing
    eels_camera = stem_controller.eels_camera
    eels_camera.stop_playing()

    print(eels_camera.hardware_source_id)

    # this table represents the acquisitions to be performed
    # each entry is energy offset, exposure (milliseconds), and the number of frames to integrate
    table = [
        # energy offset, exposure(ms), N frames
        (0, 100, 2),
        (10, 100, 2),
        #(250, 1000, 10),
        #(0, 100, 5),
    ]

    # this is the list of integrated spectra that will be the result of this script
    spectra = list()

    # this algorithm handles dark subtraction specially - so dark subtraction and gain normalization should
    # be disabled in the camera settings; this algorithm will handle dark subtraction itself.
    do_dark = True
    do_gain = False

    print("start taking data")

    energy_offset_control = "EELS_MagneticShift_Offset"  # for hardware EELS
    # energy_offset_control = "EELS_MagneticShift_Offset"  # for simulator

    tolerance_factor_from_nominal = 1.0
    timeout_for_confirmation_ms = 3000

    for energy_offset_ev, exposure_ms, frame_count in table:
        # for each table entry, set the drift tube loss to the energy offset
        stem_controller.SetValAndConfirm(energy_offset_control,
                                         energy_offset_ev,
                                         tolerance_factor_from_nominal,
                                         timeout_for_confirmation_ms)

        # configure the camera to have the desired exposure
        frame_parameters = eels_camera.get_current_frame_parameters()
        frame_parameters["exposure_ms"] = exposure_ms
        eels_camera.set_current_frame_parameters(frame_parameters)

        # disable blanker
        stem_controller.SetValAndConfirm("C_Blank", 0,
                                         tolerance_factor_from_nominal,
                                         timeout_for_confirmation_ms)

        # acquire a sequence of images and discard it; this ensures a steady state
        eels_camera.grab_sequence_prepare(frame_count)
        eels_camera.grab_sequence(frame_count)

        # acquire a sequence of images again, but now integrate the acquired images into a single image
        eels_camera.grab_sequence_prepare(frame_count)
        xdata = eels_camera.grab_sequence(frame_count)[0]

        print(f"grabbed data of shape {xdata.data_shape}")

        # extract the calibration info
        counts_per_electron = xdata.metadata.get("hardware_source",
                                                 dict()).get(
                                                     "counts_per_electron", 1)
        exposure_ms = xdata.metadata.get("hardware_source",
                                         dict()).get("exposure", 1)
        intensity_scale = xdata.intensity_calibration.scale / counts_per_electron / xdata.dimensional_calibrations[
            -1].scale / exposure_ms / frame_count

        # now sum the data in the sequence/time dimension. use xd.sum to automatically handle metadata such as calibration.
        xdata = xd.sum(xdata, 0)

        # if dark subtraction is enabled, perform another similar acquisition with blanker enabled and subtract it
        if do_dark:
            # enable blanker
            stem_controller.SetValAndConfirm("C_Blank", 1,
                                             tolerance_factor_from_nominal,
                                             timeout_for_confirmation_ms)

            # acquire a sequence of images and discard it; this ensures a steady state
            eels_camera.grab_sequence_prepare(frame_count)
            eels_camera.grab_sequence(frame_count)

            # acquire a sequence of images again, but now integrate the acquired images into a single image
            eels_camera.grab_sequence_prepare(frame_count)
            dark_xdata = eels_camera.grab_sequence(frame_count)[0]

            # sum it and subtract it from xdata
            dark_xdata = xd.sum(dark_xdata, 0)
            xdata = xdata - dark_xdata

            print(f"subtracted dark data of shape {dark_xdata.data_shape}")
        if do_gain:
            # divide out the gain
            gain_uuid = uuid.uuid4(
            )  # fill this in with the actual gain image uuid
            gain = interactive.document_controller.document_model.get_data_item_by_uuid(
                gain_uuid)
            if gain is not None:
                xdata = xdata / gain.xdata

        # next sum the 2d data into a 1d spectrum by collapsing the y-axis (0th dimension)
        # also configure the intensity calibration and title.
        spectrum = xd.sum(xdata, 0)
        spectrum.data_metadata._set_intensity_calibration(
            Calibration.Calibration(scale=intensity_scale, units="e/eV/s"))
        spectrum.data_metadata._set_metadata({
            "title":
            f"{energy_offset_ev}eV {int(exposure_ms*1000)}ms [x{frame_count}]"
        })

        # add it to the list of spectra
        spectra.append(spectrum)

    # disable blanking and return drift tube loss to 0.0eV
    stem_controller.SetValAndConfirm("C_Blank", 0,
                                     tolerance_factor_from_nominal,
                                     timeout_for_confirmation_ms)
    stem_controller.SetValAndConfirm(energy_offset_control, 0,
                                     tolerance_factor_from_nominal,
                                     timeout_for_confirmation_ms)

    print("finished taking data")

    # when multi display is available, we can combine the spectra into a single line plot display without
    # padding the data; but for now, we need to use a single master data item where each row is the same length.

    if len(spectra) > 0:
        # define the padded spectra list
        padded_spectra = list()

        # extract calibration info
        ev_per_channel = spectra[0].dimensional_calibrations[-1].scale
        units = spectra[0].dimensional_calibrations[-1].units
        min_ev = min([
            spectrum.dimensional_calibrations[-1].convert_to_calibrated_value(
                0) for spectrum in spectra
        ])
        max_ev = max([
            spectrum.dimensional_calibrations[-1].convert_to_calibrated_value(
                spectrum.data_shape[-1]) for spectrum in spectra
        ])

        # calculate what the length of the padded data will be
        data_length = int((max_ev - min_ev) / ev_per_channel)

        # for each spectra, pad it out to the appropriate length, putting the actual data in the proper range
        for spectrum in spectra:
            energy_offset_ev = int(
                (spectrum.dimensional_calibrations[-1].
                 convert_to_calibrated_value(0) - min_ev) / ev_per_channel)
            calibration_factor = spectrum.intensity_calibration.scale / spectra[
                0].intensity_calibration.scale
            data = numpy.zeros((data_length, ))
            data[energy_offset_ev:energy_offset_ev +
                 spectrum.data_shape[-1]] = spectrum.data * calibration_factor
            padded_spectrum = DataAndMetadata.new_data_and_metadata(
                data, spectrum.intensity_calibration,
                [Calibration.Calibration(min_ev, ev_per_channel, units)])
            padded_spectra.append(padded_spectrum)

        # stack all of the padded data together for display
        master_xdata = xd.vstack(padded_spectra)

        # show the data
        window = api.application.document_windows[0]
        data_item = api.library.create_data_item_from_data_and_metadata(
            master_xdata)
        legends = [s.metadata["title"] for s in spectra]
        data_item.title = f"MultiEELS ({', '.join(legends)})"
        window.display_data_item(data_item)

    print("finished")
コード例 #25
0
 def stem_controller(
         self) -> typing.Optional[stem_controller.STEMController]:
     if self._stem_controller is None:
         self._stem_controller = Registry.get_component('stem_controller')
     return self._stem_controller
コード例 #26
0
    def execute(self, src, map_regions):
        try:
            if hasattr(self.computation._computation,
                       'last_src_uuid') and hasattr(
                           self.computation._computation, 'last_map_regions'):
                map_regions_ = [
                    region.persistent_dict for region in map_regions
                ]
                if str(
                        src.uuid
                ) == self.computation._computation.last_src_uuid and map_regions_ == self.computation._computation.last_map_regions:
                    return
            metadata = copy.deepcopy(src.xdata.metadata)
            libertem_metadata = metadata.get('libertem-io')
            if libertem_metadata is None:
                return
            executor = Registry.get_component('libertem_executor')
            if executor is None:
                logging.error(
                    'No libertem executor could be retrieved from the Registry.'
                )
                return
            file_parameters = libertem_metadata['file_parameters']
            file_type = file_parameters.pop('type')
            shape = src.xdata.data_shape
            if map_regions:
                mask_data = np.zeros(shape, dtype=np.bool)
                for region in map_regions:
                    np.logical_or(mask_data,
                                  region.get_mask(shape),
                                  out=mask_data)
            else:
                mask_data = np.ones(shape, dtype=np.bool)

            ds = dataset.load(file_type, executor.ensure_sync(),
                              **file_parameters)
            udf = ApplyMasksUDF(mask_factories=[lambda: mask_data])
            dc = self.__api.application.document_controllers[
                0]._document_controller
            if hasattr(self.computation._computation, 'cancel_id'):
                print(
                    f'Cancelling task: {self.computation._computation.cancel_id}'
                )
                to_cancel = self.computation._computation.cancel_id
                self.__api.queue_task(lambda: self.__event_loop.create_task(
                    executor.cancel(to_cancel)))
                #self.computation._computation.cancel_id = None
            self.computation._computation.cancel_id = str(time.time())
            print(f'Creating task: {self.computation._computation.cancel_id}')
            dc.add_task(
                'libertem-map4d', lambda: self.__event_loop.create_task(
                    self.run_udf(udf,
                                 self.computation._computation.cancel_id,
                                 executor,
                                 dataset=ds)))
            self.computation._computation.last_src_uuid = str(src.uuid)
            self.computation._computation.last_map_regions = copy.deepcopy(
                [region.persistent_dict for region in map_regions])

        except Exception as e:
            print(str(e))
            import traceback
            traceback.print_exc()
コード例 #27
0
 def ronchigram_camera(self) -> HardwareSource.HardwareSource:
     if self.__ronchigram_camera:
         return self.__ronchigram_camera
     return Registry.get_component("ronchigram_camera_hardware_source")
コード例 #28
0
def get_context():
    executor = Registry.get_component('libertem_executor')
    return api.Context(executor=executor.ensure_sync())
コード例 #29
0
 def eels_camera(self) -> HardwareSource.HardwareSource:
     if self.__eels_camera:
         return self.__eels_camera
     return Registry.get_component("eels_camera_hardware_source")